#!/usr/bin/env bash set -e set -o pipefail MIN_BAZEL_VERSION=0.4.5 # Find out the absolute path to where ./configure resides pushd `dirname $0` > /dev/null SOURCE_BASE_DIR=`pwd -P` popd > /dev/null PLATFORM="$(uname -s | tr 'A-Z' 'a-z')" function is_linux() { [[ "${PLATFORM}" == "linux" ]] } function is_macos() { [[ "${PLATFORM}" == "darwin" ]] } function is_windows() { # On windows, the shell script is actually running in msys [[ "${PLATFORM}" =~ msys_nt*|mingw*|cygwin*|uwin* ]] } function is_ppc64le() { [[ "$(uname -m)" == "ppc64le" ]] } function sed_in_place() { sed -e $1 $2 > "$2.bak" mv "$2.bak" $2 } function write_to_bazelrc() { echo "$1" >> .tf_configure.bazelrc } function write_action_env_to_bazelrc() { write_to_bazelrc "build --action_env $1=\"$2\"" } function python_path { "$PYTHON_BIN_PATH" - <&2 if [ -z "$fromuser" ]; then exit 1 fi PYTHON_BIN_PATH="" # Retry done if [ -z "$PYTHON_LIB_PATH" ]; then # Split python_path into an array of paths, this allows path containing spaces IFS=',' read -r -a python_lib_path <<< "$(python_path)" if [ 1 = "$USE_DEFAULT_PYTHON_LIB_PATH" ]; then PYTHON_LIB_PATH=${python_lib_path[0]} echo "Using python library path: $PYTHON_LIB_PATH" else echo "Found possible Python library paths:" for x in "${python_lib_path[@]}"; do echo " $x" done set -- "${python_lib_path[@]}" echo "Please input the desired Python library path to use. Default is [$1]" read b || true if [ "$b" == "" ]; then PYTHON_LIB_PATH=${python_lib_path[0]} echo "Using python library path: $PYTHON_LIB_PATH" else PYTHON_LIB_PATH="$b" fi fi fi if [ ! -x "$PYTHON_BIN_PATH" ] || [ -d "$PYTHON_BIN_PATH" ]; then echo "PYTHON_BIN_PATH is not executable. Is it the python binary?" exit 1 fi local python_major_version python_major_version=$("${PYTHON_BIN_PATH}" -c 'from __future__ import print_function; import sys; print(sys.version_info[0]);' | head -c1) if [ -z "$python_major_version" ]; then echo -e "\n\nERROR: Problem getting python version. Is $PYTHON_BIN_PATH the correct python binary?" exit 1 fi # Convert python path to Windows style before writing into bazel.rc if is_windows; then PYTHON_BIN_PATH="$(cygpath -m "$PYTHON_BIN_PATH")" PYTHON_LIB_PATH="$(cygpath -m "$PYTHON_LIB_PATH")" fi # Set-up env variables used by python_configure.bzl write_action_env_to_bazelrc "PYTHON_BIN_PATH" "$PYTHON_BIN_PATH" write_action_env_to_bazelrc "PYTHON_LIB_PATH" "$PYTHON_LIB_PATH" write_to_bazelrc "build --define PYTHON_BIN_PATH=\"$PYTHON_BIN_PATH\"" write_to_bazelrc "build --define PYTHON_LIB_PATH=\"$PYTHON_LIB_PATH\"" write_to_bazelrc "build --force_python=py$python_major_version" write_to_bazelrc "build --host_force_python=py$python_major_version" write_to_bazelrc "build --python${python_major_version}_path=\"$PYTHON_BIN_PATH\"" write_to_bazelrc "test --force_python=py$python_major_version" write_to_bazelrc "test --host_force_python=py$python_major_version" write_to_bazelrc "test --define PYTHON_BIN_PATH=\"$PYTHON_BIN_PATH\"" write_to_bazelrc "test --define PYTHON_LIB_PATH=\"$PYTHON_LIB_PATH\"" write_to_bazelrc "run --define PYTHON_BIN_PATH=\"$PYTHON_BIN_PATH\"" write_to_bazelrc "run --define PYTHON_LIB_PATH=\"$PYTHON_LIB_PATH\"" # Write tools/python_bin_path.sh echo "export PYTHON_BIN_PATH=\"$PYTHON_BIN_PATH\"" > tools/python_bin_path.sh } function version { echo "$@" | awk -F. '{ printf("%03d%03d%03d\n", $1,$2,$3); }'; } bazel version > bazel.version set +e curr_bazel_version=$(grep -m 1 'Build label:' bazel.version | cut -d ' ' -f3) set -e rm -f bazel.version echo "You have bazel $curr_bazel_version installed." if [ -z "$curr_bazel_version" ]; then echo "WARNING: current bazel installation is not a release version." echo "Make sure you are running at least bazel $MIN_BAZEL_VERSION." elif [ "$(version "$MIN_BAZEL_VERSION")" -gt "$(version "$curr_bazel_version")" ]; then echo "Please upgrade your bazel installation to version $MIN_BAZEL_VERSION or higher to build TensorFlow!" echo "Exiting..." exit 1 fi # This file contains customized config settings. rm -f .tf_configure.bazelrc touch .tf_configure.bazelrc if [[ ! -e .bazelrc ]]; then if [[ -e "${HOME}/.bazelrc" ]]; then echo "import ${HOME}/.bazelrc" >.bazelrc else touch .bazelrc fi fi sed_in_place "/tf_configure/d" .bazelrc echo "import %workspace%/.tf_configure.bazelrc" >> .bazelrc # Delete any leftover BUILD files from the Makefile build, which would interfere # with Bazel parsing. MAKEFILE_DOWNLOAD_DIR=tensorflow/contrib/makefile/downloads if [ -d "${MAKEFILE_DOWNLOAD_DIR}" ]; then find ${MAKEFILE_DOWNLOAD_DIR} -type f -name '*BUILD' -delete fi setup_python ## Set up MKL related environment settings write_to_bazelrc 'build:mkl --define with_mkl_support=true' write_to_bazelrc 'build:mkl --define using_mkl=true' write_to_bazelrc 'build:mkl -c opt' write_to_bazelrc 'build:mkl --copt="-DEIGEN_USE_VML"' echo "" echo "Add \"--config=mkl\" to your bazel command to build with MKL support." echo "Please note that MKL on MacOS or windows is still not supported." echo "If you would like to use a local MKL instead of downloading, please " echo " set the environment variable \"TF_MKL_ROOT\" every time before build." echo "" ## End MKL setup ## Set up architecture-dependent optimization flags. if [ -z "$CC_OPT_FLAGS" ]; then if is_ppc64le; then # gcc on ppc64le does not support -march, use mcpu instead default_cc_opt_flags="-mcpu=native" else default_cc_opt_flags="-march=native" fi read -p "Please specify optimization flags to use during compilation when bazel option "\ "\"--config=opt\" is specified [Default is $default_cc_opt_flags]: " CC_OPT_FLAGS if [ -z "$CC_OPT_FLAGS" ]; then CC_OPT_FLAGS=$default_cc_opt_flags fi fi if is_windows; then TF_NEED_GCP=0 TF_NEED_HDFS=0 TF_NEED_JEMALLOC=0 TF_NEED_OPENCL=0 TF_CUDA_CLANG=0 fi if is_linux; then while [ "$TF_NEED_JEMALLOC" == "" ]; do read -p "Do you wish to use jemalloc as the malloc implementation? [Y/n] "\ INPUT case $INPUT in [Yy]* ) echo "jemalloc enabled"; TF_NEED_JEMALLOC=1;; [Nn]* ) echo "jemalloc disabled"; TF_NEED_JEMALLOC=0;; "" ) echo "jemalloc enabled"; TF_NEED_JEMALLOC=1;; * ) echo "Invalid selection: " $INPUT;; esac done else TF_NEED_JEMALLOC=0 fi if [[ "$TF_NEED_JEMALLOC" == "1" ]]; then write_to_bazelrc 'build --define with_jemalloc=true' fi while [[ "$TF_NEED_GCP" == "" ]]; do read -p "Do you wish to build TensorFlow with "\ "Google Cloud Platform support? [y/N] " INPUT case $INPUT in [Yy]* ) echo "Google Cloud Platform support will be enabled for "\ "TensorFlow"; TF_NEED_GCP=1;; [Nn]* ) echo "No Google Cloud Platform support will be enabled for "\ "TensorFlow"; TF_NEED_GCP=0;; "" ) echo "No Google Cloud Platform support will be enabled for "\ "TensorFlow"; TF_NEED_GCP=0;; * ) echo "Invalid selection: " $INPUT;; esac done if [[ "$TF_NEED_GCP" == "1" ]]; then write_to_bazelrc 'build --define with_gcp_support=true' fi while [[ "$TF_NEED_HDFS" == "" ]]; do read -p "Do you wish to build TensorFlow with "\ "Hadoop File System support? [y/N] " INPUT case $INPUT in [Yy]* ) echo "Hadoop File System support will be enabled for "\ "TensorFlow"; TF_NEED_HDFS=1;; [Nn]* ) echo "No Hadoop File System support will be enabled for "\ "TensorFlow"; TF_NEED_HDFS=0;; "" ) echo "No Hadoop File System support will be enabled for "\ "TensorFlow"; TF_NEED_HDFS=0;; * ) echo "Invalid selection: " $INPUT;; esac done if [[ "$TF_NEED_HDFS" == "1" ]]; then write_to_bazelrc 'build --define with_hdfs_support=true' fi ## Enable XLA. while [[ "$TF_ENABLE_XLA" == "" ]]; do read -p "Do you wish to build TensorFlow with the XLA just-in-time compiler (experimental)? [y/N] " INPUT case $INPUT in [Yy]* ) echo "XLA JIT support will be enabled for TensorFlow"; TF_ENABLE_XLA=1;; [Nn]* ) echo "No XLA JIT support will be enabled for TensorFlow"; TF_ENABLE_XLA=0;; "" ) echo "No XLA support will be enabled for TensorFlow"; TF_ENABLE_XLA=0;; * ) echo "Invalid selection: " $INPUT;; esac done if [[ "$TF_ENABLE_XLA" == "1" ]]; then write_to_bazelrc 'build --define with_xla_support=true' fi # Verbs configuration while [ "$TF_NEED_VERBS" == "" ]; do read -p "Do you wish to build TensorFlow with "\ "VERBS support? [y/N] " INPUT case $INPUT in [Yy]* ) echo "VERBS support will be enabled for "\ "TensorFlow"; TF_NEED_VERBS=1;; [Nn]* ) echo "No VERBS support will be enabled for "\ "TensorFlow"; TF_NEED_VERBS=0;; "" ) echo "No VERBS support will be enabled for "\ "TensorFlow"; TF_NEED_VERBS=0;; * ) echo "Invalid selection: " $INPUT;; esac done if [[ "$TF_NEED_VERBS" == "1" ]]; then write_to_bazelrc 'build --define with_verbs_support=true' fi # Append CC optimization flags to bazel.rc for opt in $CC_OPT_FLAGS; do write_to_bazelrc "build:opt --cxxopt=$opt --copt=$opt" done # Run the gen_git_source to create links where bazel can track dependencies for # git hash propagation GEN_GIT_SOURCE=tensorflow/tools/git/gen_git_source.py chmod a+x ${GEN_GIT_SOURCE} "${PYTHON_BIN_PATH}" ${GEN_GIT_SOURCE} --configure "${SOURCE_BASE_DIR}" ## Set up SYCL-related environment settings while [ "$TF_NEED_OPENCL" == "" ]; do read -p "Do you wish to build TensorFlow with OpenCL support? [y/N] " INPUT case $INPUT in [Yy]* ) echo "OpenCL support will be enabled for TensorFlow"; TF_NEED_OPENCL=1;; [Nn]* ) echo "No OpenCL support will be enabled for TensorFlow"; TF_NEED_OPENCL=0;; "" ) echo "No OpenCL support will be enabled for TensorFlow"; TF_NEED_OPENCL=0;; * ) echo "Invalid selection: " $INPUT;; esac done ## Set up Cuda-related environment settings while [ "$TF_NEED_CUDA" == "" ]; do read -p "Do you wish to build TensorFlow with CUDA support? [y/N] " INPUT case $INPUT in [Yy]* ) echo "CUDA support will be enabled for TensorFlow"; TF_NEED_CUDA=1;; [Nn]* ) echo "No CUDA support will be enabled for TensorFlow"; TF_NEED_CUDA=0;; "" ) echo "No CUDA support will be enabled for TensorFlow"; TF_NEED_CUDA=0;; * ) echo "Invalid selection: " $INPUT;; esac done export TF_NEED_CUDA write_action_env_to_bazelrc "TF_NEED_CUDA" "$TF_NEED_CUDA" export TF_NEED_OPENCL write_action_env_to_bazelrc "TF_NEED_OPENCL" "$TF_NEED_OPENCL" if [ "$TF_NEED_CUDA" == "1" ]; then while [[ "$TF_CUDA_CLANG" == "" ]]; do read -p "Do you want to use clang as CUDA compiler? [y/N] " INPUT case $INPUT in [Yy]* ) echo "Clang will be used as CUDA compiler"; TF_CUDA_CLANG=1;; [Nn]* ) echo "nvcc will be used as CUDA compiler"; TF_CUDA_CLANG=0;; "" ) echo "nvcc will be used as CUDA compiler"; TF_CUDA_CLANG=0;; * ) echo "Invalid selection: " $INPUT;; esac done export TF_CUDA_CLANG write_action_env_to_bazelrc "TF_CUDA_CLANG" "$TF_CUDA_CLANG" # Set up which clang we should use as the cuda / host compiler. while [[ "$TF_CUDA_CLANG" == "1" ]] && true; do fromuser="" if [ -z "$CLANG_CUDA_COMPILER_PATH" ]; then default_clang_host_compiler_path=$(which clang || true) read -p "Please specify which clang should be used as device and host compiler. [Default is $default_clang_host_compiler_path]: " CLANG_CUDA_COMPILER_PATH fromuser="1" if [ -z "$CLANG_CUDA_COMPILER_PATH" ]; then CLANG_CUDA_COMPILER_PATH="$default_clang_host_compiler_path" fi fi if [ -e "$CLANG_CUDA_COMPILER_PATH" ]; then export CLANG_CUDA_COMPILER_PATH write_action_env_to_bazelrc "CLANG_CUDA_COMPILER_PATH" "$CLANG_CUDA_COMPILER_PATH" break fi echo "Invalid clang path. ${CLANG_CUDA_COMPILER_PATH} cannot be found" 1>&2 if [ -z "$fromuser" ]; then exit 1 fi CLANG_CUDA_COMPILER_PATH="" # Retry done # Find out where the CUDA toolkit is installed while true; do # Configure the Cuda SDK version to use. if [ -z "$TF_CUDA_VERSION" ]; then read -p "Please specify the CUDA SDK version you want to use, e.g. 7.0. [Leave empty to default to CUDA 8.0]: " TF_CUDA_VERSION fi # Set default CUDA version if not set TF_CUDA_VERSION=${TF_CUDA_VERSION:-8.0} fromuser="" if [ -z "$CUDA_TOOLKIT_PATH" ]; then default_cuda_path=/usr/local/cuda if is_windows; then if [ -z "$CUDA_PATH" ]; then default_cuda_path="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v8.0" else default_cuda_path="$(cygpath -m "$CUDA_PATH")" fi elif is_linux; then # If the default doesn't exist, try an alternative default. if [ ! -d $default_cuda_path ] && [ -d /opt/cuda ]; then default_cuda_path=/opt/cuda fi fi read -p "Please specify the location where CUDA $TF_CUDA_VERSION toolkit is installed. Refer to README.md for more details. [Default is $default_cuda_path]: " CUDA_TOOLKIT_PATH fromuser="1" if [ -z "$CUDA_TOOLKIT_PATH" ]; then CUDA_TOOLKIT_PATH="$default_cuda_path" fi fi if [[ -z "$TF_CUDA_VERSION" ]]; then TF_CUDA_EXT="" else TF_CUDA_EXT=".$TF_CUDA_VERSION" fi if is_windows; then CUDA_RT_LIB_PATH="lib/x64/cudart.lib" elif is_linux; then CUDA_RT_LIB_PATH="lib64/libcudart.so${TF_CUDA_EXT}" elif is_macos; then CUDA_RT_LIB_PATH="lib/libcudart${TF_CUDA_EXT}.dylib" fi if [ -e "${CUDA_TOOLKIT_PATH}/${CUDA_RT_LIB_PATH}" ]; then export CUDA_TOOLKIT_PATH write_action_env_to_bazelrc "CUDA_TOOLKIT_PATH" "$CUDA_TOOLKIT_PATH" export TF_CUDA_VERSION break fi echo "Invalid path to CUDA $TF_CUDA_VERSION toolkit. ${CUDA_TOOLKIT_PATH}/${CUDA_RT_LIB_PATH} cannot be found" if [ -z "$fromuser" ]; then exit 1 fi # Retry TF_CUDA_VERSION="" CUDA_TOOLKIT_PATH="" done export TF_CUDA_VERSION write_action_env_to_bazelrc "TF_CUDA_VERSION" "$TF_CUDA_VERSION" # Set up which gcc nvcc should use as the host compiler # No need to set this on Windows while [[ "$TF_CUDA_CLANG" != "1" ]] && ! is_windows && true; do fromuser="" if [ -z "$GCC_HOST_COMPILER_PATH" ]; then default_gcc_host_compiler_path=$(which gcc || true) cuda_bin_symlink="$CUDA_TOOLKIT_PATH/bin/gcc" if [ -L "$cuda_bin_symlink" ]; then default_gcc_host_compiler_path=$(readlink $cuda_bin_symlink) fi read -p "Please specify which gcc should be used by nvcc as the host compiler. [Default is $default_gcc_host_compiler_path]: " GCC_HOST_COMPILER_PATH fromuser="1" if [ -z "$GCC_HOST_COMPILER_PATH" ]; then GCC_HOST_COMPILER_PATH="$default_gcc_host_compiler_path" fi fi if [ -e "$GCC_HOST_COMPILER_PATH" ]; then export GCC_HOST_COMPILER_PATH write_action_env_to_bazelrc "GCC_HOST_COMPILER_PATH" "$GCC_HOST_COMPILER_PATH" break fi echo "Invalid gcc path. ${GCC_HOST_COMPILER_PATH} cannot be found" 1>&2 if [ -z "$fromuser" ]; then exit 1 fi GCC_HOST_COMPILER_PATH="" # Retry done # Find out where the cuDNN library is installed while true; do # Configure the cuDNN version to use. if [ -z "$TF_CUDNN_VERSION" ]; then read -p "Please specify the cuDNN version you want to use. [Leave empty to default to cuDNN 6.0]: " TF_CUDNN_VERSION fi # Set default CUDNN version if not set TF_CUDNN_VERSION=${TF_CUDNN_VERSION:-6} fromuser="" if [ -z "$CUDNN_INSTALL_PATH" ]; then default_cudnn_path=${CUDA_TOOLKIT_PATH} read -p "Please specify the location where cuDNN $TF_CUDNN_VERSION library is installed. Refer to README.md for more details. [Default is $default_cudnn_path]: " CUDNN_INSTALL_PATH fromuser="1" if [ -z "$CUDNN_INSTALL_PATH" ]; then CUDNN_INSTALL_PATH=$default_cudnn_path fi # Result returned from "read" will be used unexpanded. That make "~" unusable. # Going through one more level of expansion to handle that. CUDNN_INSTALL_PATH=`"${PYTHON_BIN_PATH}" -c "import os; print(os.path.realpath(os.path.expanduser('${CUDNN_INSTALL_PATH}')))"` if is_windows; then CUDNN_INSTALL_PATH="$(cygpath -m "$CUDNN_INSTALL_PATH")" fi fi if [[ -z "$TF_CUDNN_VERSION" ]]; then TF_CUDNN_EXT="" else TF_CUDNN_EXT=".$TF_CUDNN_VERSION" fi if is_windows; then CUDA_DNN_LIB_PATH="lib/x64/cudnn.lib" CUDA_DNN_LIB_ALT_PATH="lib/x64/cudnn.lib" elif is_linux; then CUDA_DNN_LIB_PATH="lib64/libcudnn.so${TF_CUDNN_EXT}" CUDA_DNN_LIB_ALT_PATH="libcudnn.so${TF_CUDNN_EXT}" elif is_macos; then CUDA_DNN_LIB_PATH="lib/libcudnn${TF_CUDNN_EXT}.dylib" CUDA_DNN_LIB_ALT_PATH="libcudnn${TF_CUDNN_EXT}.dylib" fi if [ -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_ALT_PATH}" ] || [ -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_PATH}" ]; then export TF_CUDNN_VERSION write_action_env_to_bazelrc "TF_CUDNN_VERSION" "$TF_CUDNN_VERSION" export CUDNN_INSTALL_PATH write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "$CUDNN_INSTALL_PATH" break fi if is_linux; then if ! type ldconfig > /dev/null 2>&1; then LDCONFIG_BIN=/sbin/ldconfig else LDCONFIG_BIN=ldconfig fi CUDNN_PATH_FROM_LDCONFIG="$($LDCONFIG_BIN -p | sed -n 's/.*libcudnn.so .* => \(.*\)/\1/p')" if [ -e "${CUDNN_PATH_FROM_LDCONFIG}${TF_CUDNN_EXT}" ]; then export TF_CUDNN_VERSION export CUDNN_INSTALL_PATH CUDNN_INSTALL_PATH="$(dirname ${CUDNN_PATH_FROM_LDCONFIG})" write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "$CUDNN_INSTALL_PATH" break fi fi echo "Invalid path to cuDNN ${CUDNN_VERSION} toolkit. Neither of the following two files can be found:" echo "${CUDNN_INSTALL_PATH}/${CUDA_DNN_LIB_PATH}" echo "${CUDNN_INSTALL_PATH}/${CUDA_DNN_LIB_ALT_PATH}" if is_linux; then echo "${CUDNN_PATH_FROM_LDCONFIG}${TF_CUDNN_EXT}" fi if [ -z "$fromuser" ]; then exit 1 fi # Retry TF_CUDNN_VERSION="" CUDNN_INSTALL_PATH="" done export TF_CUDNN_VERSION write_action_env_to_bazelrc "TF_CUDNN_VERSION" "$TF_CUDNN_VERSION" # Configure the compute capabilities that TensorFlow builds for. # Since Cuda toolkit is not backward-compatible, this is not guaranteed to work. function get_native_cuda_compute_capabilities { device_query_bin="$CUDA_TOOLKIT_PATH/extras/demo_suite/deviceQuery" # Also works on Windows without .exe "$device_query_bin" | grep 'Capability' | grep -o '[0-9]*\.[0-9]*' | sed ':a;{N;s/\n/,/};ba' exit 0 # ensure that this function always exit success even if device detection fails, to prevent the whole configure from aborting } while true; do fromuser="" native_cuda_compute_capabilities=$(get_native_cuda_compute_capabilities) default_cuda_compute_capabilities=${native_cuda_compute_capabilities:-"3.5,5.2"} if [ -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then cat << EOF Please specify a list of comma-separated Cuda compute capabilities you want to build with. You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus. Please note that each additional compute capability significantly increases your build time and binary size. EOF read -p "[Default is: \"$default_cuda_compute_capabilities\"]: " TF_CUDA_COMPUTE_CAPABILITIES fromuser=1 fi if [ -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then TF_CUDA_COMPUTE_CAPABILITIES=$default_cuda_compute_capabilities fi # Check whether all capabilities from the input is valid COMPUTE_CAPABILITIES=${TF_CUDA_COMPUTE_CAPABILITIES//,/ } ALL_VALID=1 for CAPABILITY in $COMPUTE_CAPABILITIES; do if [[ ! "$CAPABILITY" =~ [0-9]+.[0-9]+ ]]; then echo "Invalid compute capability: " $CAPABILITY ALL_VALID=0 break fi done if [ "$ALL_VALID" == "0" ]; then if [ -z "$fromuser" ]; then exit 1 fi else export TF_CUDA_COMPUTE_CAPABILITIES write_action_env_to_bazelrc "TF_CUDA_COMPUTE_CAPABILITIES" "$TF_CUDA_COMPUTE_CAPABILITIES" break fi TF_CUDA_COMPUTE_CAPABILITIES="" done if is_windows; then # The following three variables are needed for MSVC toolchain configuration in Bazel export CUDA_PATH="$CUDA_TOOLKIT_PATH" export CUDA_COMPUTE_CAPABILITIES="$TF_CUDA_COMPUTE_CAPABILITIES" export NO_WHOLE_ARCHIVE_OPTION=1 write_action_env_to_bazelrc "CUDA_PATH" "$CUDA_PATH" write_action_env_to_bazelrc "CUDA_COMPUTE_CAPABILITIES" "$CUDA_COMPUTE_CAPABILITIES" write_action_env_to_bazelrc "NO_WHOLE_ARCHIVE_OPTION" "1" write_to_bazelrc "build --config=win-cuda" write_to_bazelrc "test --config=win-cuda" else # If CUDA is enabled, always use GPU during build and test. if [ "$TF_CUDA_CLANG" == "1" ]; then write_to_bazelrc "build --config=cuda_clang" write_to_bazelrc "test --config=cuda_clang" else write_to_bazelrc "build --config=cuda" write_to_bazelrc "test --config=cuda" fi fi # end of if "$TF_NEED_CUDA" == "1" fi # OpenCL configuration if [ "$TF_NEED_OPENCL" == "1" ]; then # Determine which C++ compiler should be used as the host compiler while true; do fromuser="" if [ -z "$HOST_CXX_COMPILER" ]; then default_cxx_host_compiler=$(which g++ || true) read -p "Please specify which C++ compiler should be used as the host C++ compiler. [Default is $default_cxx_host_compiler]: " HOST_CXX_COMPILER fromuser="1" if [ -z "$HOST_CXX_COMPILER" ]; then HOST_CXX_COMPILER=$default_cxx_host_compiler fi fi if [ -e "$HOST_CXX_COMPILER" ]; then export HOST_CXX_COMPILER write_action_env_to_bazelrc "HOST_CXX_COMPILER" "$HOST_CXX_COMPILER" break fi echo "Invalid C++ compiler path. ${HOST_CXX_COMPILER} cannot be found" 1>&2 if [ -z "$fromuser" ]; then exit 1 fi HOST_CXX_COMPILER="" # Retry done # Determine which C compiler should be used as the host compiler while true; do fromuser="" if [ -z "$HOST_C_COMPILER" ]; then default_c_host_compiler=$(which gcc || true) read -p "Please specify which C compiler should be used as the host C compiler. [Default is $default_c_host_compiler]: " HOST_C_COMPILER fromuser="1" if [ -z "$HOST_C_COMPILER" ]; then HOST_C_COMPILER=$default_c_host_compiler fi fi if [ -e "$HOST_C_COMPILER" ]; then export HOST_C_COMPILER write_action_env_to_bazelrc "HOST_C_COMPILER" "$HOST_C_COMPILER" break fi echo "Invalid C compiler path. ${HOST_C_COMPILER} cannot be found" 1>&2 if [ -z "$fromuser" ]; then exit 1 fi HOST_C_COMPILER="" # Retry done while true; do # Configure the OPENCL version to use. TF_OPENCL_VERSION="1.2" # Point to ComputeCpp root if [ -z "$COMPUTECPP_TOOLKIT_PATH" ]; then default_computecpp_toolkit_path=/usr/local/computecpp read -p "Please specify the location where ComputeCpp for SYCL $TF_OPENCL_VERSION is installed. [Default is $default_computecpp_toolkit_path]: " COMPUTECPP_TOOLKIT_PATH fromuser="1" if [ -z "$COMPUTECPP_TOOLKIT_PATH" ]; then COMPUTECPP_TOOLKIT_PATH=$default_computecpp_toolkit_path fi fi if is_linux; then SYCL_RT_LIB_PATH="lib/libComputeCpp.so" fi if [ -e "${COMPUTECPP_TOOLKIT_PATH}/${SYCL_RT_LIB_PATH}" ]; then export COMPUTECPP_TOOLKIT_PATH write_action_env_to_bazelrc "COMPUTECPP_TOOLKIT_PATH" "$COMPUTECPP_TOOLKIT_PATH" break fi echo "Invalid SYCL $TF_OPENCL_VERSION library path. ${COMPUTECPP_TOOLKIT_PATH}/${SYCL_RT_LIB_PATH} cannot be found" if [ -z "$fromuser" ]; then exit 1 fi # Retry TF_OPENCL_VERSION="" COMPUTECPP_TOOLKIT_PATH="" done # end of if "$TF_NEED_OPENCL" == "1" fi while [ "$TF_NEED_MPI" == "" ]; do read -p "Do you wish to build TensorFlow with "\ "MPI support? [y/N] " INPUT case $INPUT in [Yy]* ) echo "MPI support will be enabled for "\ "TensorFlow"; TF_NEED_MPI=1;; [Nn]* ) echo "MPI support will not be enabled for "\ "TensorFlow"; TF_NEED_MPI=0;; "" ) echo "MPI support will not be enabled for "\ "TensorFlow"; TF_NEED_MPI=0;; * ) echo "Invalid selection: " $INPUT;; esac done # Find out where the MPI toolkit is installed while true; do if [ "$TF_NEED_MPI" == "0" ]; then break; fi fromuser="" if [ -z "$MPI_HOME" ]; then #Get the base folder by removing the bin path default_mpi_path=$(dirname $(dirname $(which mpirun)) || dirname $(dirname $(which mpiexec)) || true) read -p "Please specify the MPI toolkit folder. [Default is $default_mpi_path]: " MPI_HOME fromuser="1" if [ -z "$MPI_HOME" ]; then MPI_HOME=$default_mpi_path fi fi #Check that the include and library folders are where we expect them to be if [ -e "$MPI_HOME/include" ] && [ -e "$MPI_HOME/lib" ]; then break fi echo "Invalid path to the MPI Toolkit. ${MPI_HOME}/include or ${MPI_HOME}/lib cannot be found." if [ -z "$fromuser" ]; then exit 1 fi # Retry MPI_HOME="" done if [ "$TF_NEED_MPI" == "1" ]; then write_to_bazelrc 'build --define with_mpi_support=true' #Link the MPI header files ln -sf "${MPI_HOME}/include/mpi.h" third_party/mpi/mpi.h #Determine if we use OpenMPI or MVAPICH, these require different header files #to be included here to make bazel dependency checker happy if [ -e "${MPI_HOME}/include/mpi_portable_platform.h" ]; then #OpenMPI ln -sf "${MPI_HOME}/include/mpi_portable_platform.h" third_party/mpi/ sed -i -e "s/MPI_LIB_IS_OPENMPI=False/MPI_LIB_IS_OPENMPI=True/" third_party/mpi/mpi.bzl else #MVAPICH / MPICH ln -sf "${MPI_HOME}/include/mpio.h" third_party/mpi/ ln -sf "${MPI_HOME}/include/mpicxx.h" third_party/mpi/ sed -i -e "s/MPI_LIB_IS_OPENMPI=True/MPI_LIB_IS_OPENMPI=False/" third_party/mpi/mpi.bzl fi if [ -e "${MPI_HOME}/lib/libmpi.so" ]; then ln -sf "${MPI_HOME}/lib/libmpi.so" third_party/mpi/ else echo "Cannot find the MPI library file in ${MPI_HOME}/lib " exit 1 fi fi echo "Configuration finished"