aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/tools
diff options
context:
space:
mode:
authorGravatar Eugene Brevdo <ebrevdo@gmail.com>2016-03-10 17:18:30 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-03-11 11:41:23 -0800
commit56f1d64998744ad655fe5c428658a13be35b865e (patch)
tree1c4e5ec1192835898b9e17f462cf62838534add2 /tensorflow/tools
parent64dd5b58d52d37697d5beb68e2177b966108e0a7 (diff)
Fix dependencies bugs
Change: 116925769
Diffstat (limited to 'tensorflow/tools')
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.android7
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.cpu7
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu14
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.gpu9
-rw-r--r--tensorflow/tools/ci_build/README.md2
-rwxr-xr-xtensorflow/tools/ci_build/builds/configured2
-rwxr-xr-xtensorflow/tools/ci_build/builds/docker_test.sh127
-rwxr-xr-xtensorflow/tools/ci_build/builds/pip.sh286
-rwxr-xr-xtensorflow/tools/ci_build/builds/print_build_info.sh2
-rwxr-xr-xtensorflow/tools/ci_build/builds/test_installation.sh292
-rw-r--r--tensorflow/tools/ci_build/builds/test_tutorials.sh46
-rwxr-xr-xtensorflow/tools/ci_build/builds/with_the_same_user2
-rwxr-xr-xtensorflow/tools/ci_build/ci_build.sh30
-rwxr-xr-xtensorflow/tools/ci_build/ci_parameterized_build.sh94
-rwxr-xr-xtensorflow/tools/ci_build/install/install_bazel.sh2
-rwxr-xr-xtensorflow/tools/ci_build/install/install_bootstrap_deb_packages.sh (renamed from tensorflow/tools/ci_build/install/install_openjdk8_from_ppa.sh)6
-rwxr-xr-xtensorflow/tools/ci_build/install/install_deb_packages.sh5
-rwxr-xr-xtensorflow/tools/ci_build/update_version.sh134
-rw-r--r--tensorflow/tools/docker/Dockerfile12
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel2
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-gpu5
-rw-r--r--tensorflow/tools/docker/Dockerfile.gpu14
-rw-r--r--tensorflow/tools/docker/README.md2
-rwxr-xr-xtensorflow/tools/docker/docker_run_gpu.sh2
-rw-r--r--tensorflow/tools/pip_package/setup.py14
-rw-r--r--tensorflow/tools/test/BUILD40
-rw-r--r--tensorflow/tools/test/__init__.py20
-rw-r--r--tensorflow/tools/test/gpu_info_lib.py184
-rw-r--r--tensorflow/tools/test/system_info.py33
-rw-r--r--tensorflow/tools/test/system_info_lib.py149
30 files changed, 1241 insertions, 303 deletions
diff --git a/tensorflow/tools/ci_build/Dockerfile.android b/tensorflow/tools/ci_build/Dockerfile.android
index 0bffe80fcb..444ce17d98 100644
--- a/tensorflow/tools/ci_build/Dockerfile.android
+++ b/tensorflow/tools/ci_build/Dockerfile.android
@@ -3,11 +3,10 @@ FROM ubuntu:14.04
MAINTAINER Jan Prach <jendap@google.com>
# Copy and run the install scripts.
-COPY install/install_deb_packages.sh /install/install_deb_packages.sh
+COPY install/*.sh /install/
+RUN /install/install_bootstrap_deb_packages.sh
+RUN add-apt-repository -y ppa:openjdk-r/ppa
RUN /install/install_deb_packages.sh
-COPY install/install_openjdk8_from_ppa.sh /install/install_openjdk8_from_ppa.sh
-RUN /install/install_openjdk8_from_ppa.sh
-COPY install/install_bazel.sh /install/install_bazel.sh
RUN /install/install_bazel.sh
# Set up bazelrc.
diff --git a/tensorflow/tools/ci_build/Dockerfile.cpu b/tensorflow/tools/ci_build/Dockerfile.cpu
index 7bef5e07fe..acc84f136a 100644
--- a/tensorflow/tools/ci_build/Dockerfile.cpu
+++ b/tensorflow/tools/ci_build/Dockerfile.cpu
@@ -3,11 +3,10 @@ FROM ubuntu:14.04
MAINTAINER Jan Prach <jendap@google.com>
# Copy and run the install scripts.
-COPY install/install_deb_packages.sh /install/install_deb_packages.sh
+COPY install/*.sh /install/
+RUN /install/install_bootstrap_deb_packages.sh
+RUN add-apt-repository -y ppa:openjdk-r/ppa
RUN /install/install_deb_packages.sh
-COPY install/install_openjdk8_from_ppa.sh /install/install_openjdk8_from_ppa.sh
-RUN /install/install_openjdk8_from_ppa.sh
-COPY install/install_bazel.sh /install/install_bazel.sh
RUN /install/install_bazel.sh
# Set up bazelrc.
diff --git a/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu b/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu
new file mode 100644
index 0000000000..fc37a5bb28
--- /dev/null
+++ b/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu
@@ -0,0 +1,14 @@
+FROM debian:jessie
+
+MAINTAINER Jan Prach <jendap@google.com>
+
+# Copy and run the install scripts.
+COPY install/*.sh /install/
+RUN /install/install_bootstrap_deb_packages.sh
+RUN echo "deb http://http.debian.net/debian jessie-backports main" | tee -a /etc/apt/sources.list
+RUN /install/install_deb_packages.sh
+RUN /install/install_bazel.sh
+
+# Set up bazelrc.
+COPY install/.bazelrc /root/.bazelrc
+ENV BAZELRC /root/.bazelrc
diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu b/tensorflow/tools/ci_build/Dockerfile.gpu
index b57d1d18c1..b4b0ccccf7 100644
--- a/tensorflow/tools/ci_build/Dockerfile.gpu
+++ b/tensorflow/tools/ci_build/Dockerfile.gpu
@@ -1,13 +1,12 @@
-FROM nvidia/cuda:7.0-cudnn2-devel
+FROM nvidia/cuda:7.5-cudnn4-devel
MAINTAINER Jan Prach <jendap@google.com>
# Copy and run the install scripts.
-COPY install/install_deb_packages.sh /install/install_deb_packages.sh
+COPY install/*.sh /install/
+RUN /install/install_bootstrap_deb_packages.sh
+RUN add-apt-repository -y ppa:openjdk-r/ppa
RUN /install/install_deb_packages.sh
-COPY install/install_openjdk8_from_ppa.sh /install/install_openjdk8_from_ppa.sh
-RUN /install/install_openjdk8_from_ppa.sh
-COPY install/install_bazel.sh /install/install_bazel.sh
RUN /install/install_bazel.sh
# Set up bazelrc.
diff --git a/tensorflow/tools/ci_build/README.md b/tensorflow/tools/ci_build/README.md
index 90ede0b60c..aca5829b3c 100644
--- a/tensorflow/tools/ci_build/README.md
+++ b/tensorflow/tools/ci_build/README.md
@@ -73,7 +73,7 @@ tensorflow/tools/ci_build/ci_build.sh CPU bazel test //tensorflow/...
tensorflow/tools/ci_build/ci_build.sh GPU bazel build -c opt --config=cuda //tensorflow/...
# build pip with gpu support
-tensorflow/tools/ci_build/ci_build.sh GPU tensorflow/tools/ci_build/builds/gpu_pip.sh
+tensorflow/tools/ci_build/ci_build.sh GPU tensorflow/tools/ci_build/builds/pip.sh GPU
# build android example app
tensorflow/tools/ci_build/ci_build.sh ANDROID tensorflow/tools/ci_build/builds/android.sh
diff --git a/tensorflow/tools/ci_build/builds/configured b/tensorflow/tools/ci_build/builds/configured
index d452eac65e..297937e24e 100755
--- a/tensorflow/tools/ci_build/builds/configured
+++ b/tensorflow/tools/ci_build/builds/configured
@@ -32,7 +32,9 @@ else
export TF_NEED_CUDA=0
fi
+pushd "${CI_TENSORFLOW_SUBMODULE_PATH:-.}"
./configure
+popd
# Gather and print build information
SCRIPT_DIR=$( cd ${0%/*} && pwd -P )
diff --git a/tensorflow/tools/ci_build/builds/docker_test.sh b/tensorflow/tools/ci_build/builds/docker_test.sh
new file mode 100755
index 0000000000..7a1af79c89
--- /dev/null
+++ b/tensorflow/tools/ci_build/builds/docker_test.sh
@@ -0,0 +1,127 @@
+#!/usr/bin/env bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# Build and test TensorFlow docker images.
+# The tests include Python unit tests-on-install and tutorial tests.
+#
+# Usage: docker_test.sh <IMAGE_TYPE> <TAG> <WHL_PATH>
+# Arguments:
+# IMAGE_TYPE : Type of the image: (CPU|GPU)
+# TAG : Docker image tag
+# WHL_PATH : Path to the whl file to be installed inside the docker image
+#
+# e.g.: docker_test.sh CPU someone/tensorflow:0.8.0 pip_test/whl/tensorflow-0.8.0-cp27-none-linux_x86_64.whl
+#
+
+# Helper functions
+# Exit after a failure
+die() {
+ echo $@
+ exit 1
+}
+
+# Convert to lower case
+to_lower () {
+ echo "$1" | tr '[:upper:]' '[:lower:]'
+}
+
+
+# Helper function to traverse directories up until given file is found.
+function upsearch () {
+ test / == "$PWD" && return || \
+ test -e "$1" && echo "$PWD" && return || \
+ cd .. && upsearch "$1"
+}
+
+
+# Verify command line argument
+if [[ $# != "3" ]]; then
+ die "Usage: $(basename $0) <IMAGE_TYPE> <TAG> <WHL_PATH>"
+fi
+IMAGE_TYPE=$(to_lower "$1")
+DOCKER_IMG_TAG=$2
+WHL_PATH=$3
+
+# Verify image type
+if [[ "${IMAGE_TYPE}" == "cpu" ]]; then
+ DOCKERFILE="tensorflow/tools/docker/Dockerfile"
+elif [[ "${IMAGE_TYPE}" == "gpu" ]]; then
+ DOCKERFILE="tensorflow/tools/docker/Dockerfile.gpu"
+else
+ die "Unrecognized image type: $1"
+fi
+
+# Verify docker binary existence
+if [[ -z $(which docker) ]]; then
+ die "FAILED: docker binary unavailable"
+fi
+
+# Locate the base directory
+BASE_DIR=$(upsearch "${DOCKERFILE}")
+if [[ -z "${BASE_DIR}" ]]; then
+ die "FAILED: Unable to find the base directory where the dockerfile "\
+"${DOCKERFFILE} resides"
+fi
+echo "Base directory: ${BASE_DIR}"
+
+pushd ${BASE_DIR} > /dev/null
+
+# Build docker image
+DOCKERFILE_PATH="${BASE_DIR}/${DOCKERFILE}"
+DOCKERFILE_DIR="$(dirname ${DOCKERFILE_PATH})"
+
+# Check to make sure that the whl file exists
+test -f ${WHL_PATH} || \
+ die "whl file does not exist: ${WHL_PATH}"
+
+TMP_WHL_DIR="${DOCKERFILE_DIR}/whl"
+mkdir -p "${TMP_WHL_DIR}"
+cp "${WHL_PATH}" "${TMP_WHL_DIR}/" || \
+ die "FAILED to copy whl file from ${WHL_PATH} to ${TMP_WHL_DIR}/"
+
+docker build -t "${DOCKER_IMG_TAG}" -f "${DOCKERFILE_PATH}" \
+"${DOCKERFILE_DIR}" || \
+ die "FAILED to build docker image from Dockerfile ${DOCKERFILE_PATH}"
+
+# Clean up
+rm -rf "${TMP_WHL_DIR}" || \
+ die "Failed to remove temporary directory ${TMP_WHL_DIR}"
+
+
+# Add extra params for cuda devices and libraries for GPU container.
+if [ "${IMAGE_TYPE}" == "gpu" ]; then
+ devices=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
+ libs=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | xargs -I{} echo '-v {}:{}')
+ GPU_EXTRA_PARAMS="${devices} ${libs}"
+else
+ GPU_EXTRA_PARAMS=""
+fi
+
+# Run docker image with source directory mapped
+docker run -v ${BASE_DIR}:/tensorflow-src -w /tensorflow-src \
+${GPU_EXTRA_PARAMS} \
+"${DOCKER_IMG_TAG}" \
+/bin/bash -c "tensorflow/tools/ci_build/builds/test_installation.sh && "\
+"tensorflow/tools/ci_build/builds/test_tutorials.sh"
+
+RESULT=$?
+
+popd > /dev/null
+if [[ ${RESULT} == 0 ]]; then
+ echo "SUCCESS: Built and tested docker image: ${DOCKER_IMG_TAG}"
+else
+ die "FAILED to build and test docker image: ${DOCKER_IMG_TAG}"
+fi
diff --git a/tensorflow/tools/ci_build/builds/pip.sh b/tensorflow/tools/ci_build/builds/pip.sh
index 66ebf13baa..16364fbf9e 100755
--- a/tensorflow/tools/ci_build/builds/pip.sh
+++ b/tensorflow/tools/ci_build/builds/pip.sh
@@ -13,55 +13,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
-# Build the Python PIP installation package for TensorFlow
-# and run the Python unit tests from the source code on the installation
+#
+# Build the Python PIP installation package for TensorFlow and install
+# the package.
+# The PIP installation is done using the --user flag.
#
# Usage:
-# pip.sh CONTAINER_TYPE
+# pip.sh CONTAINER_TYPE [--test_tutorials]
#
# When executing the Python unit tests, the script obeys the shell
-# variables: PY_TEST_WHITELIST, PY_TEST_BLACKLIST, PY_TEST_GPU_BLACKLIST,
-# and NO_TEST_ON_INSTALL
-#
-# To select only a subset of the Python tests to run, set the environment
-# variable PY_TEST_WHITELIST, e.g.,
-# PY_TEST_WHITELIST="tensorflow/python/kernel_tests/shape_ops_test.py"
-# Separate the tests with a colon (:). Leave this environment variable empty
-# to disable the whitelist.
+# variables: TF_BUILD_BAZEL_CLEAN, NO_TEST_ON_INSTALL
#
-# You can also ignore a set of the tests by using the environment variable
-# PY_TEST_BLACKLIST. For example, you can include in PY_TEST_BLACKLIST the
-# tests that depend on Python modules in TensorFlow source that are not
-# exported publicly.
+# TF_BUILD_BAZEL_CLEAN, if set to any non-empty and non-0 value, directs the
+# script to perform bazel clean prior to main build and test steps.
#
-# In addition, you can put blacklist for only GPU build inthe environment
-# variable PY_TEST_GPU_BLACKLIST.
-#
-# If the environmental variable NO_TEST_ON_INSTALL is set to any non-empty
-# value, the script will exit after the pip install step.
-
-# =============================================================================
-# Test blacklist: General
+# If NO_TEST_ON_INSTALL has any non-empty and non-0 value, the test-on-install
+# part will be skipped.
#
-# tensorflow/python/framework/ops_test.py
-# depends on depends on "test_ops", which is defined in a C++ file wrapped as
-# a .py file through the Bazel rule “tf_gen_ops_wrapper_py”.
-# tensorflow/util/protobuf/compare_test.py:
-# depends on compare_test_pb2 defined outside Python
-# tensorflow/python/framework/device_test.py:
-# depends on CheckValid() and ToString(), both defined externally
+# I the --test_tutorials flag is set, it will cause the script to run the
+# tutorial tests (see test_tutorials.sh) after the PIP
+# installation and the Python unit tests-on-install step.
#
-PY_TEST_BLACKLIST="${PY_TEST_BLACKLIST}:"\
-"tensorflow/python/framework/ops_test.py:"\
-"tensorflow/python/util/protobuf/compare_test.py:"\
-"tensorflow/python/framework/device_test.py"
-
-# Test blacklist: GPU-only
-PY_TEST_GPU_BLACKLIST="${PY_TEST_GPU_BLACKLIST}:"\
-"tensorflow/python/framework/function_test.py"
-
-# =============================================================================
# Helper functions
# Get the absolute path from a path
@@ -69,15 +41,30 @@ abs_path() {
[[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
}
+
# Exit after a failure
die() {
echo $@
exit 1
}
+
# Get the command line arguments
CONTAINER_TYPE=$( echo "$1" | tr '[:upper:]' '[:lower:]' )
+if [[ ! -z "${TF_BUILD_BAZEL_CLEAN}" ]] && \
+ [[ "${TF_BUILD_BAZEL_CLEAN}" != "0" ]]; then
+ echo "TF_BUILD_BAZEL_CLEAN=${TF_BUILD_BAZEL_CLEAN}: Performing 'bazel clean'"
+ bazel clean
+fi
+
+DO_TEST_TUTORIALS=0
+for ARG in $@; do
+ if [[ "${ARG}" == "--test_tutorials" ]]; then
+ DO_TEST_TUTORIALS=1
+ fi
+done
+
PIP_BUILD_TARGET="//tensorflow/tools/pip_package:build_pip_package"
if [[ ${CONTAINER_TYPE} == "cpu" ]]; then
bazel build -c opt ${PIP_BUILD_TARGET} || die "Build failed."
@@ -96,6 +83,12 @@ if [[ ${CONTAINER_TYPE} == "gpu" ]]; then
PY_TEST_BLACKLIST="${PY_TEST_BLACKLIST}:${PY_TEST_GPU_BLACKLIST}"
fi
+# If still in a virtualenv, deactivate it first
+if [[ ! -z "$(which deactivate)" ]]; then
+ echo "It appears that we are already in a virtualenv. Deactivating..."
+ deactivate || die "FAILED: Unable to deactivate from existing virtualenv"
+fi
+
# Obtain the path to Python binary
source tools/python_bin_path.sh
@@ -109,18 +102,20 @@ fi
# installation of Python
PY_MAJOR_MINOR_VER=$(${PYTHON_BIN_PATH} -V 2>&1 | awk '{print $NF}' | cut -d. -f-2)
-echo "Python binary path to be used in PIP install-test: ${PYTHON_BIN_PATH} "\
+echo "Python binary path to be used in PIP install: ${PYTHON_BIN_PATH} "\
"(Major.Minor version: ${PY_MAJOR_MINOR_VER})"
# Build PIP Wheel file
-PIP_WHL_DIR="pip_test/whl"
-PIP_WHL_DIR=`abs_path ${PIP_WHL_DIR}` # Get absolute path
+PIP_TEST_ROOT="pip_test"
+PIP_WHL_DIR="${PIP_TEST_ROOT}/whl"
+PIP_WHL_DIR=$(abs_path ${PIP_WHL_DIR}) # Get absolute path
rm -rf ${PIP_WHL_DIR} && mkdir -p ${PIP_WHL_DIR}
-bazel-bin/tensorflow/tools/pip_package/build_pip_package ${PIP_WHL_DIR} &&
+bazel-bin/tensorflow/tools/pip_package/build_pip_package ${PIP_WHL_DIR} || \
+die "build_pip_package FAILED"
# Perform installation
-WHL_PATH=`ls ${PIP_WHL_DIR}/tensorflow*.whl`
-if [[ `echo ${WHL_PATH} | wc -w` -ne 1 ]]; then
+WHL_PATH=$(ls ${PIP_WHL_DIR}/tensorflow*.whl)
+if [[ $(echo ${WHL_PATH} | wc -w) -ne 1 ]]; then
die "ERROR: Failed to find exactly one built TensorFlow .whl file in "\
"directory: ${PIP_WHL_DIR}"
fi
@@ -130,180 +125,47 @@ echo "whl file path = ${WHL_PATH}"
# Install, in user's local home folder
echo "Installing pip whl file: ${WHL_PATH}"
-# Call pip install twice, first time with --upgrade and second time without it
-# This addresses the sporadic test failures related to protobuf version
-${PYTHON_BIN_PATH} -m pip install -v --user --upgrade ${WHL_PATH} numpy==1.8.2 &&
-${PYTHON_BIN_PATH} -m pip install -v --user ${WHL_PATH} &&
+# Create temporary directory for install test
+VENV_DIR="${PIP_TEST_ROOT}/venv"
+rm -rf "${VENV_DIR}" && mkdir -p "${VENV_DIR}"
+echo "Create directory for virtualenv: ${VENV_DIR}"
+
+# Verify that virtualenv exists
+if [[ -z $(which virtualenv) ]]; then
+ die "FAILED: virtualenv not available on path"
+fi
+
+virtualenv -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" ||
+die "FAILED: Unable to create virtualenv"
+
+source "${VENV_DIR}/bin/activate" ||
+die "FAILED: Unable to activate virtualenv"
+
+# Install the pip file in virtual env
+pip install -v ${WHL_PATH} \
+&& echo "Successfully installed pip package ${WHL_PATH}" \
+|| die "pip install (without --upgrade) FAILED"
# If NO_TEST_ON_INSTALL is set to any non-empty value, skip all Python
# tests-on-install and exit right away
-if [[ ! -z ${NO_TEST_ON_INSTALL} ]]; then
+if [[ ! -z "${NO_TEST_ON_INSTALL}" ]] &&
+ [[ "${NO_TEST_ON_INSTALL}" != "0" ]]; then
echo "NO_TEST_ON_INSTALL=${NO_TEST_ON_INSTALL}:"
echo " Skipping ALL Python unit tests on install"
exit 0
fi
-# Directory from which the unit-test files will be run
-PY_TEST_DIR_REL="pip_test/tests"
-PY_TEST_DIR=`abs_path ${PY_TEST_DIR_REL}` # Get absolute path
-rm -rf ${PY_TEST_DIR} && mkdir -p ${PY_TEST_DIR}
-
-# Create test log directory
-PY_TEST_LOG_DIR_REL=${PY_TEST_DIR_REL}/logs
-PY_TEST_LOG_DIR=`abs_path ${PY_TEST_LOG_DIR_REL}` # Absolute path
-
-mkdir ${PY_TEST_LOG_DIR}
-
-# Copy source files that are required by the tests but are not included in the
-# PIP package
-
-# Look for local Python library directory
-LIB_PYTHON_DIR=""
+# Call test_installation.sh to perform test-on-install
+DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-# Candidate locations of the local Python library directory
-LIB_PYTHON_DIR_CANDS="${HOME}/.local/lib/python${PY_MAJOR_MINOR_VER}* "\
-"${HOME}/Library/Python/${PY_MAJOR_MINOR_VER}*/lib/python"
+"${DIR}/test_installation.sh" --virtualenv ||
+die "PIP tests-on-install FAILED"
-for CAND in ${LIB_PYTHON_DIR_CANDS}; do
- if [[ -d "${CAND}" ]]; then
- LIB_PYTHON_DIR="${CAND}"
- break
- fi
-done
-
-if [[ -z ${LIB_PYTHON_DIR} ]]; then
- die "Failed to find local Python library directory"
-else
- echo "Found local Python library directory at: ${LIB_PYTHON_DIR}"
+# Optional: Run the tutorial tests
+if [[ "${DO_TEST_TUTORIALS}" == "1" ]]; then
+ "${DIR}/test_tutorials.sh" --virtualenv ||
+die "PIP tutorial tests-on-install FAILED"
fi
-PACKAGES_DIR=`ls -d ${LIB_PYTHON_DIR}/*-packages | head -1`
-
-echo "Copying some source directories that are required by tests but are "\
-"not included in install to Python packages directory: ${PACKAGES_DIR}"
-
-# tensorflow.python.tools
-rm -rf ${PACKAGES_DIR}/tensorflow/python/tools
-cp -r tensorflow/python/tools \
- ${PACKAGES_DIR}/tensorflow/python/tools
-touch ${PACKAGES_DIR}/tensorflow/python/tools/__init__.py # Make module visible
-
-echo "Copying additional files required by tests to working directory "\
-"for test: ${PY_TEST_DIR}"
-
-# Image files required by some tests, e.g., images_ops_test.py
-mkdir -p ${PY_TEST_DIR}/tensorflow/core/lib
-rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/jpeg
-cp -r tensorflow/core/lib/jpeg ${PY_TEST_DIR}/tensorflow/core/lib
-rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/png
-cp -r tensorflow/core/lib/png ${PY_TEST_DIR}/tensorflow/core/lib
-
-# Run tests
-DIR0=`pwd`
-ALL_PY_TESTS=`find tensorflow/python -name "*_test.py"`
-# TODO(cais): Add tests in tensorflow/contrib
-
-PY_TEST_COUNT=`echo ${ALL_PY_TESTS} | wc -w`
-
-if [[ ${PY_TEST_COUNT} -eq 0 ]]; then
- die "ERROR: Cannot find any tensorflow Python unit tests to run on install"
-fi
-
-# Iterate through all the Python unit test files using the installation
-COUNTER=0
-PASS_COUNTER=0
-FAIL_COUNTER=0
-SKIP_COUNTER=0
-FAILED_TESTS=""
-FAILED_TEST_LOGS=""
-
-for TEST_FILE_PATH in ${ALL_PY_TESTS}; do
- ((COUNTER++))
-
- PROG_STR="(${COUNTER} / ${PY_TEST_COUNT})"
-
- # If PY_TEST_WHITELIST is not empty, only the white-listed tests will be run
- if [[ ! -z ${PY_TEST_WHITELIST} ]] && \
- [[ ! ${PY_TEST_WHITELIST} == *"${TEST_FILE_PATH}"* ]]; then
- ((SKIP_COUNTER++))
- echo "${PROG_STR} Non-whitelisted test SKIPPED: ${TEST_FILE_PATH}"
- continue
- fi
-
- # If the test is in the black list, skip it
- if [[ ${PY_TEST_BLACKLIST} == *"${TEST_FILE_PATH}"* ]]; then
- ((SKIP_COUNTER++))
- echo "${PROG_STR} Blacklisted test SKIPPED: ${TEST_FILE_PATH}"
- continue
- fi
-
- # Copy to a separate directory to guard against the possibility of picking up
- # modules in the source directory
- cp ${TEST_FILE_PATH} ${PY_TEST_DIR}/
-
- TEST_BASENAME=`basename "${TEST_FILE_PATH}"`
-
- # Relative path of the test log. Use long path in case there are duplicate
- # file names in the Python tests
- TEST_LOG_REL="${PY_TEST_LOG_DIR_REL}/${TEST_FILE_PATH}.log"
- mkdir -p `dirname ${TEST_LOG_REL}` # Create directory for log
-
- TEST_LOG=`abs_path ${TEST_LOG_REL}` # Absolute path
-
- # Before running the test, cd away from the Tensorflow source to
- # avoid the possibility of picking up dependencies from the
- # source directory
- cd ${PY_TEST_DIR}
- ${PYTHON_BIN_PATH} ${PY_TEST_DIR}/${TEST_BASENAME} >${TEST_LOG} 2>&1
-
- # Check for pass or failure status of the test outtput and exit
- if [[ $? -eq 0 ]]; then
- ((PASS_COUNTER++))
-
- echo "${PROG_STR} Python test-on-install PASSED: ${TEST_FILE_PATH}"
- else
- ((FAIL_COUNTER++))
-
- FAILED_TESTS="${FAILED_TESTS} ${TEST_FILE_PATH}"
-
- FAILED_TEST_LOGS="${FAILED_TEST_LOGS} ${TEST_LOG_REL}"
-
- echo "${PROG_STR} Python test-on-install FAILED: ${TEST_FILE_PATH}"
- echo " Log @: ${TEST_LOG_REL}"
- echo "============== BEGINS failure log content =============="
- cat ${TEST_LOG}
- echo "============== ENDS failure log content =============="
- echo ""
- fi
- cd ${DIR0}
-
- # Clean up files for this test
- rm -f ${PY_TEST_DIR}/${TEST_BASENAME}
-
-done
-
-echo ""
-echo "${PY_TEST_COUNT} Python test(s):" \
- "${PASS_COUNTER} passed;" \
- "${FAIL_COUNTER} failed; " \
- "${SKIP_COUNTER} skipped"
-echo "Test logs directory: ${PY_TEST_LOG_DIR_REL}"
-
-if [[ ${FAIL_COUNTER} -eq 0 ]]; then
- echo ""
- echo "Python test-on-install SUCCEEDED"
-
- exit 0
-else
- echo "FAILED test(s):"
- FAILED_TEST_LOGS=($FAILED_TEST_LOGS)
- FAIL_COUNTER=0
- for TEST_NAME in ${FAILED_TESTS}; do
- echo " ${TEST_NAME} (Log @: ${FAILED_TEST_LOGS[${FAIL_COUNTER}]})"
- ((FAIL_COUNTER++))
- done
-
- echo ""
- echo "Python test-on-install FAILED"
- exit 1
-fi
+deactivate ||
+die "FAILED: Unable to deactivate virtualenv"
diff --git a/tensorflow/tools/ci_build/builds/print_build_info.sh b/tensorflow/tools/ci_build/builds/print_build_info.sh
index f243c185c0..95b5eb8b83 100755
--- a/tensorflow/tools/ci_build/builds/print_build_info.sh
+++ b/tensorflow/tools/ci_build/builds/print_build_info.sh
@@ -63,7 +63,7 @@ if [[ ! -z $(which swig) ]]; then
fi
# Information about TensorFlow source
-TF_FETCH_URL=$(git remote show origin | grep "Fetch URL:" | awk '{print $3}')
+TF_FETCH_URL=$(git config --get remote.origin.url)
TF_HEAD=$(git rev-parse HEAD)
# NVIDIA & CUDA info
diff --git a/tensorflow/tools/ci_build/builds/test_installation.sh b/tensorflow/tools/ci_build/builds/test_installation.sh
new file mode 100755
index 0000000000..d2c8d21c5b
--- /dev/null
+++ b/tensorflow/tools/ci_build/builds/test_installation.sh
@@ -0,0 +1,292 @@
+#!/usr/bin/env bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# Build the Python PIP installation package for TensorFlow
+# and run the Python unit tests from the source code on the installation
+#
+# Usage:
+# test_installation.sh [--virtualenv]
+#
+# If the flag --virtualenv is set, the script will use "python" as the Python
+# binary path. Otherwise, it will use tools/python_bin_path.sh to determine
+# the Python binary path.
+#
+# When executing the Python unit tests, the script obeys the shell
+# variables: PY_TEST_WHITELIST, PY_TEST_BLACKLIST, PY_TEST_GPU_BLACKLIST,
+#
+# To select only a subset of the Python tests to run, set the environment
+# variable PY_TEST_WHITELIST, e.g.,
+# PY_TEST_WHITELIST="tensorflow/python/kernel_tests/shape_ops_test.py"
+# Separate the tests with a colon (:). Leave this environment variable empty
+# to disable the whitelist.
+#
+# You can also ignore a set of the tests by using the environment variable
+# PY_TEST_BLACKLIST. For example, you can include in PY_TEST_BLACKLIST the
+# tests that depend on Python modules in TensorFlow source that are not
+# exported publicly.
+#
+# In addition, you can put blacklist for only GPU build inthe environment
+# variable PY_TEST_GPU_BLACKLIST.
+#
+# TF_BUILD_BAZEL_CLEAN, if set to any non-empty and non-0 value, directs the
+# script to perform bazel clean prior to main build and test steps.
+#
+# If the environmental variable NO_TEST_ON_INSTALL is set to any non-empty
+# value, the script will exit after the pip install step.
+
+# =============================================================================
+# Test blacklist: General
+#
+# tensorflow/python/framework/ops_test.py
+# depends on depends on "test_ops", which is defined in a C++ file wrapped as
+# a .py file through the Bazel rule “tf_gen_ops_wrapper_py”.
+# tensorflow/util/protobuf/compare_test.py:
+# depends on compare_test_pb2 defined outside Python
+# tensorflow/python/framework/device_test.py:
+# depends on CheckValid() and ToString(), both defined externally
+#
+PY_TEST_BLACKLIST="${PY_TEST_BLACKLIST}:"\
+"tensorflow/python/framework/ops_test.py:"\
+"tensorflow/python/util/protobuf/compare_test.py:"\
+"tensorflow/python/framework/device_test.py"
+
+# Test blacklist: GPU-only
+PY_TEST_GPU_BLACKLIST="${PY_TEST_GPU_BLACKLIST}:"\
+"tensorflow/python/framework/function_test.py"
+
+# =============================================================================
+
+
+# Helper functions
+# Get the absolute path from a path
+abs_path() {
+ [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
+}
+
+
+die() {
+ echo $@
+ exit 1
+}
+
+
+# Obtain the path to Python binary
+# source tools/python_bin_path.sh
+if [[ "$1" == "--virtualenv" ]]; then
+ PYTHON_BIN_PATH="$(which python)"
+else
+ source tools/python_bin_path.sh
+ # Assume: PYTHON_BIN_PATH is exported by the script above
+fi
+
+if [[ -z "${PYTHON_BIN_PATH}" ]]; then
+ die "PYTHON_BIN_PATH was not provided. If this is not virtualenv, "\
+"did you run configure?"
+fi
+
+# Determine the major and minor versions of Python being used (e.g., 2.7)
+# This info will be useful for determining the directory of the local pip
+# installation of Python
+PY_MAJOR_MINOR_VER=$(${PYTHON_BIN_PATH} -V 2>&1 | awk '{print $NF}' | cut -d. -f-2)
+
+echo "Python binary path to be used in PIP install-test: ${PYTHON_BIN_PATH} "\
+"(Major.Minor version: ${PY_MAJOR_MINOR_VER})"
+
+# Avoid permission issues outside container
+umask 000
+
+# Directory from which the unit-test files will be run
+PY_TEST_DIR_REL="pip_test/tests"
+PY_TEST_DIR=$(abs_path ${PY_TEST_DIR_REL}) # Get absolute path
+rm -rf ${PY_TEST_DIR} && mkdir -p ${PY_TEST_DIR}
+
+# Create test log directory
+PY_TEST_LOG_DIR_REL=${PY_TEST_DIR_REL}/logs
+PY_TEST_LOG_DIR=$(abs_path ${PY_TEST_LOG_DIR_REL}) # Absolute path
+
+mkdir ${PY_TEST_LOG_DIR}
+
+
+# Copy source files that are required by the tests but are not included in the
+# PIP package
+
+# Look for local Python library directory
+# pushd/popd avoids importing TensorFlow from the source directory.
+pushd /tmp > /dev/null
+TF_INSTALL_PATH=$(dirname \
+ $("${PYTHON_BIN_PATH}" -c "import tensorflow as tf; print(tf.__file__)"))
+popd > /dev/null
+
+if [[ -z ${TF_INSTALL_PATH} ]]; then
+ die "Failed to find path where TensorFlow is installed."
+else
+ echo "Found TensorFlow install path: ${TF_INSTALL_PATH}"
+fi
+
+echo "Copying some source directories required by Python unit tests but "\
+"not included in install to TensorFlow install path: ${TF_INSTALL_PATH}"
+
+# Files for tensorflow.python.tools
+rm -rf ${TF_INSTALL_PATH}/python/tools
+cp -r tensorflow/python/tools \
+ ${TF_INSTALL_PATH}/python/tools
+touch ${TF_INSTALL_PATH}/python/tools/__init__.py # Make module visible
+
+# Files for tensorflow.examples
+rm -rf ${TF_INSTALL_PATH}/examples/image_retraining
+mkdir -p ${TF_INSTALL_PATH}/examples/image_retraining
+cp -r tensorflow/examples/image_retraining/retrain.py \
+ ${TF_INSTALL_PATH}/examples/image_retraining/retrain.py
+touch ${TF_INSTALL_PATH}/examples/__init__.py
+touch ${TF_INSTALL_PATH}/examples/image_retraining/__init__.py
+
+echo "Copying additional files required by tests to working directory "\
+"for test: ${PY_TEST_DIR}"
+
+# Image files required by some tests, e.g., images_ops_test.py
+
+mkdir -p ${PY_TEST_DIR}/tensorflow/core/lib
+rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/jpeg
+cp -r tensorflow/core/lib/jpeg ${PY_TEST_DIR}/tensorflow/core/lib
+rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/png
+cp -r tensorflow/core/lib/png ${PY_TEST_DIR}/tensorflow/core/lib
+
+# Run tests
+DIR0=$(pwd)
+ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} -name "*_test.py" | sort)
+# TODO(cais): Add tests in tensorflow/contrib
+
+PY_TEST_COUNT=$(echo ${ALL_PY_TESTS} | wc -w)
+
+if [[ ${PY_TEST_COUNT} -eq 0 ]]; then
+ die "ERROR: Cannot find any tensorflow Python unit tests to run on install"
+fi
+
+# Iterate through all the Python unit test files using the installation
+COUNTER=0
+PASS_COUNTER=0
+FAIL_COUNTER=0
+SKIP_COUNTER=0
+FAILED_TESTS=""
+FAILED_TEST_LOGS=""
+
+for TEST_FILE_PATH in ${ALL_PY_TESTS}; do
+ ((COUNTER++))
+
+ PROG_STR="(${COUNTER} / ${PY_TEST_COUNT})"
+
+ # If PY_TEST_WHITELIST is not empty, only the white-listed tests will be run
+ if [[ ! -z ${PY_TEST_WHITELIST} ]] && \
+ [[ ! ${PY_TEST_WHITELIST} == *"${TEST_FILE_PATH}"* ]]; then
+ ((SKIP_COUNTER++))
+ echo "${PROG_STR} Non-whitelisted test SKIPPED: ${TEST_FILE_PATH}"
+ continue
+ fi
+
+ # If the test is in the black list, skip it
+ if [[ ${PY_TEST_BLACKLIST} == *"${TEST_FILE_PATH}"* ]]; then
+ ((SKIP_COUNTER++))
+ echo "${PROG_STR} Blacklisted test SKIPPED: ${TEST_FILE_PATH}"
+ continue
+ fi
+
+ # Copy to a separate directory to guard against the possibility of picking up
+ # modules in the source directory
+ cp ${TEST_FILE_PATH} ${PY_TEST_DIR}/
+
+ TEST_BASENAME=$(basename "${TEST_FILE_PATH}")
+
+ # Relative path of the test log. Use long path in case there are duplicate
+ # file names in the Python tests
+ TEST_LOG_REL="${PY_TEST_LOG_DIR_REL}/${TEST_FILE_PATH}.log"
+ mkdir -p $(dirname ${TEST_LOG_REL}) # Create directory for log
+
+ TEST_LOG=$(abs_path ${TEST_LOG_REL}) # Absolute path
+
+ # Start the stopwatch for this test
+ START_TIME=$(date +'%s')
+
+ # Before running the test, cd away from the Tensorflow source to
+ # avoid the possibility of picking up dependencies from the
+ # source directory
+ cd ${PY_TEST_DIR}
+ ${PYTHON_BIN_PATH} ${PY_TEST_DIR}/${TEST_BASENAME} >${TEST_LOG} 2>&1
+
+ TEST_RESULT=$?
+
+ END_TIME=$(date +'%s')
+ ELAPSED_TIME="$((${END_TIME} - ${START_TIME})) s"
+
+ # Check for pass or failure status of the test outtput and exit
+ if [[ ${TEST_RESULT} -eq 0 ]]; then
+ ((PASS_COUNTER++))
+
+ echo "${PROG_STR} Python test-on-install PASSED (${ELAPSED_TIME}): "\
+"${TEST_FILE_PATH}"
+ else
+ ((FAIL_COUNTER++))
+
+ FAILED_TESTS="${FAILED_TESTS} ${TEST_FILE_PATH}"
+
+ FAILED_TEST_LOGS="${FAILED_TEST_LOGS} ${TEST_LOG_REL}"
+
+ echo "${PROG_STR} Python test-on-install FAILED (${ELPASED_TIME}): "\
+"${TEST_FILE_PATH}"
+
+ echo " Log @: ${TEST_LOG_REL}"
+ echo "============== BEGINS failure log content =============="
+ cat ${TEST_LOG}
+ echo "============== ENDS failure log content =============="
+ echo ""
+ fi
+ cd ${DIR0}
+
+ # Clean up files for this test
+ rm -f ${PY_TEST_DIR}/${TEST_BASENAME}
+
+done
+
+# Clean up files copied for Python unit tests:
+rm -rf ${TF_INSTALL_PATH}/python/tools
+rm -rf ${TF_INSTALL_PATH}/examples/image_retraining
+rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/jpeg
+rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/png
+
+echo ""
+echo "${PY_TEST_COUNT} Python test(s):" \
+ "${PASS_COUNTER} passed;" \
+ "${FAIL_COUNTER} failed; " \
+ "${SKIP_COUNTER} skipped"
+echo "Test logs directory: ${PY_TEST_LOG_DIR_REL}"
+
+if [[ ${FAIL_COUNTER} -eq 0 ]]; then
+ echo ""
+ echo "Python test-on-install SUCCEEDED"
+
+ exit 0
+else
+ echo "FAILED test(s):"
+ FAILED_TEST_LOGS=($FAILED_TEST_LOGS)
+ FAIL_COUNTER=0
+ for TEST_NAME in ${FAILED_TESTS}; do
+ echo " ${TEST_NAME} (Log @: ${FAILED_TEST_LOGS[${FAIL_COUNTER}]})"
+ ((FAIL_COUNTER++))
+ done
+
+ echo ""
+ echo "Python test-on-install FAILED"
+ exit 1
+fi
diff --git a/tensorflow/tools/ci_build/builds/test_tutorials.sh b/tensorflow/tools/ci_build/builds/test_tutorials.sh
index 13c26fd61f..bb65460186 100644
--- a/tensorflow/tools/ci_build/builds/test_tutorials.sh
+++ b/tensorflow/tools/ci_build/builds/test_tutorials.sh
@@ -21,7 +21,11 @@
# decrement of loss with training, and verifying the existence of saved
# checkpoints and summaries files.
#
-# Usage: test_tutorials.sh
+# Usage: test_tutorials.sh [--virtualenv]
+#
+# If the flag --virtualenv is set, the script will use "python" as the Python
+# binary path. Otherwise, it will use tools/python_bin_path.sh to determine
+# the Python binary path.
#
# This script obeys the following environment variables (if exists):
# TUT_TESTS_BLACKLIST: Force skipping of specified tutorial tests listed
@@ -104,42 +108,48 @@ if [[ -z "$(which ${TIMEOUT_BIN})" ]]; then
fi
echo "Binary path for timeout: \"$(which ${TIMEOUT_BIN})\""
+# Avoid permission issues outside Docker containers
+umask 000
+
mkdir -p "${LOGS_DIR}" || die "Failed to create logs directory"
mkdir -p "${TUT_TEST_ROOT}" || die "Failed to create test directory"
-source tools/python_bin_path.sh
-
-if [[ -z "$PYTHON_BIN_PATH" ]]; then
- die "PYTHON_BIN_PATH was not provided. Did you run configure?"
+if [[ "$1" == "--virtualenv" ]]; then
+ PYTHON_BIN_PATH="$(which python)"
+else
+ source tools/python_bin_path.sh
fi
-echo "Binary path for python: \"$PYTHON_BIN_PATH\""
+if [[ -z "${PYTHON_BIN_PATH}" ]]; then
+ die "PYTHON_BIN_PATH was not provided. If this is not virtualenv, "\
+"did you run configure?"
+else
+ echo "Binary path for python: \"$PYTHON_BIN_PATH\""
+fi
# Determine the TensorFlow installation path
+# pushd/popd avoids importing TensorFlow from the source directory.
pushd /tmp > /dev/null
-TF_INSTALL_PATH=$(dirname $(${PYTHON_BIN_PATH} -c "import tensorflow; print(tensorflow.__file__)"))
+TF_INSTALL_PATH=$(dirname \
+ $("${PYTHON_BIN_PATH}" -c "import tensorflow as tf; print(tf.__file__)"))
popd > /dev/null
echo "Detected TensorFlow installation path: ${TF_INSTALL_PATH}"
TEST_DIR="pip_test/tutorials"
-mkdir -p "${TEST_DIR}" ||
-die "Failed to create test directory: ${TEST_DIR}"
+mkdir -p "${TEST_DIR}" || \
+ die "Failed to create test directory: ${TEST_DIR}"
# Copy folders required by mnist tutorials
-if [[ ! -d "${TF_INSTALL_PATH}/examples/tutorials/mnist" ]]; then
- echo "Copying files required by MNIST tutorials..."
-
- mkdir -p "${TF_INSTALL_PATH}/examples/tutorials"
- cp tensorflow/examples/tutorials/__init__.py \
+mkdir -p "${TF_INSTALL_PATH}/examples/tutorials"
+cp tensorflow/examples/tutorials/__init__.py \
"${TF_INSTALL_PATH}/examples/tutorials/"
- cp -r tensorflow/examples/tutorials/mnist \
+cp -r tensorflow/examples/tutorials/mnist \
"${TF_INSTALL_PATH}/examples/tutorials/"
- if [[ ! -d "${TF_INSTALL_PATH}/examples/tutorials/mnist" ]]; then
- die "FAILED: Unable to copy directory required by MNIST tutorials: "\
+if [[ ! -d "${TF_INSTALL_PATH}/examples/tutorials/mnist" ]]; then
+ die "FAILED: Unable to copy directory required by MNIST tutorials: "\
"${TF_INSTALL_PATH}/examples/tutorials/mnist"
- fi
fi
# -----------------------------------------------------------
diff --git a/tensorflow/tools/ci_build/builds/with_the_same_user b/tensorflow/tools/ci_build/builds/with_the_same_user
index bab6f14c10..43773f23ba 100755
--- a/tensorflow/tools/ci_build/builds/with_the_same_user
+++ b/tensorflow/tools/ci_build/builds/with_the_same_user
@@ -17,7 +17,7 @@
# This script is a wrapper creating the same user inside container as the one
# running the ci_build.sh outside the container. It also set the home directory
# for the user inside container to match the same absolute path as the workspace
-# outside of continer.
+# outside of container.
# We do this so that the bazel running inside container generate symbolic links
# and user permissions which makes sense outside of container.
# Do not run this manually. It does not make sense. It is intended to be called
diff --git a/tensorflow/tools/ci_build/ci_build.sh b/tensorflow/tools/ci_build/ci_build.sh
index 9525017793..24c14f2197 100755
--- a/tensorflow/tools/ci_build/ci_build.sh
+++ b/tensorflow/tools/ci_build/ci_build.sh
@@ -20,10 +20,15 @@ CONTAINER_TYPE=$( echo "$1" | tr '[:upper:]' '[:lower:]' )
shift 1
COMMAND=("$@")
+# Figure out the directory where this script is.
+SCRIPT_DIR=$( cd ${0%/*} && pwd -P )
+
# Validate command line arguments.
-if [ "$#" -lt 1 ] || [[ ! "${CONTAINER_TYPE}" =~ ^(cpu|gpu|android)$ ]]; then
+if [ "$#" -lt 1 ] || [ ! -e "${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}" ]; then
+ supported_container_types=$( ls -1 ${SCRIPT_DIR}/Dockerfile.* | \
+ sed -n 's/.*Dockerfile\.\([^\/]*\)/\1/p' | tr '\n' ' ' )
>&2 echo "Usage: $(basename $0) CONTAINER_TYPE COMMAND"
- >&2 echo " CONTAINER_TYPE can be 'CPU' or 'GPU'"
+ >&2 echo " CONTAINER_TYPE can be one of [ ${supported_container_types}]"
>&2 echo " COMMAND is a command (with arguments) to run inside"
>&2 echo " the container."
>&2 echo ""
@@ -38,12 +43,10 @@ fi
if [[ "${CI_DOCKER_EXTRA_PARAMS}" != *"--rm"* ]]; then
CI_DOCKER_EXTRA_PARAMS="--rm ${CI_DOCKER_EXTRA_PARAMS}"
fi
-CI_COMMAND_PREFIX=("${CI_COMMAND_PREFIX[@]:-tensorflow/tools/ci_build/builds/with_the_same_user tensorflow/tools/ci_build/builds/configured ${CONTAINER_TYPE}}")
+CI_TENSORFLOW_SUBMODULE_PATH="${CI_TENSORFLOW_SUBMODULE_PATH:-.}"
+CI_COMMAND_PREFIX=("${CI_COMMAND_PREFIX[@]:-${CI_TENSORFLOW_SUBMODULE_PATH}/tensorflow/tools/ci_build/builds/with_the_same_user ${CI_TENSORFLOW_SUBMODULE_PATH}/tensorflow/tools/ci_build/builds/configured ${CONTAINER_TYPE}}")
-# Figure out the directory where this script is.
-SCRIPT_DIR=$( cd ${0%/*} && pwd -P )
-
# Helper function to traverse directories up until given file is found.
function upsearch () {
test / == "$PWD" && return || \
@@ -60,7 +63,7 @@ BUILD_TAG="${BUILD_TAG:-tf_ci}"
# Add extra params for cuda devices and libraries for GPU container.
if [ "${CONTAINER_TYPE}" == "gpu" ]; then
devices=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
- libs=$(\ls /usr/lib/x86_64-linux-gnu/libcuda* | xargs -I{} echo '-v {}:{}')
+ libs=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | xargs -I{} echo '-v {}:{}')
GPU_EXTRA_PARAMS="${devices} ${libs}"
else
GPU_EXTRA_PARAMS=""
@@ -98,12 +101,13 @@ mkdir -p ${WORKSPACE}/bazel-ci_build-cache
docker run \
-v ${WORKSPACE}/bazel-ci_build-cache:${WORKSPACE}/bazel-ci_build-cache \
-e "CI_BUILD_HOME=${WORKSPACE}/bazel-ci_build-cache" \
- -e "CI_BUILD_USER=${USER}" \
- -e "CI_BUILD_UID=$(id -u $USER)" \
- -e "CI_BUILD_GROUP=$(id -g --name $USER)" \
- -e "CI_BUILD_GID=$(id -g $USER)" \
- -v ${WORKSPACE}:/tensorflow \
- -w /tensorflow \
+ -e "CI_BUILD_USER=$(id -u --name)" \
+ -e "CI_BUILD_UID=$(id -u)" \
+ -e "CI_BUILD_GROUP=$(id -g --name)" \
+ -e "CI_BUILD_GID=$(id -g)" \
+ -e "CI_TENSORFLOW_SUBMODULE_PATH=${CI_TENSORFLOW_SUBMODULE_PATH}" \
+ -v ${WORKSPACE}:/workspace \
+ -w /workspace \
${GPU_EXTRA_PARAMS} \
${CI_DOCKER_EXTRA_PARAMS[@]} \
"${DOCKER_IMG_NAME}" \
diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh
index 97b25f32a0..46c1740af6 100755
--- a/tensorflow/tools/ci_build/ci_parameterized_build.sh
+++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh
@@ -21,7 +21,7 @@
# TF_BUILD_CONTAINER_TYPE: (CPU | GPU | ANDROID)
# TF_BUILD_PYTHON_VERSION: (PYTHON2 | PYTHON3)
# TF_BUILD_IS_OPT: (NO_OPT | OPT)
-# TF_BUILD_IS_PIP: (NO_PIP | PIP)
+# TF_BUILD_IS_PIP: (NO_PIP | PIP | BOTH)
#
# Note: certain combinations of parameter values are regarded
# as invalid and will cause the script to exit with code 0. For example:
@@ -49,6 +49,11 @@
# (i.e., bazel test --job=1), potentially useful for
# builds where the tests cannot be run in parallel due to
# resource contention (e.g., for GPU builds)
+# TF_BUILD_TEST_TUTORIALS:
+# If set to any non-empty and non-0 value, will perform
+# tutorials tests (Applicable only if TF_BUILD_IS_PIP is
+# PIP or BOTH).
+# See builds/test_tutorials.sh
#
# This script can be used by Jenkins parameterized / matrix builds.
@@ -62,6 +67,12 @@ str_strip () {
echo -e "$1" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//'
}
+# Helper function: Exit on failure
+die () {
+ echo $@
+ exit 1
+}
+
##########################################################
# Default configuration
@@ -84,11 +95,12 @@ BAZEL_CLEAN_CMD="bazel clean"
BAZEL_SERIAL_FLAG="--jobs=1"
PIP_CMD="${CI_BUILD_DIR}/builds/pip.sh"
+PIP_TEST_TUTORIALS_FLAG="--test_tutorials"
ANDROID_CMD="${CI_BUILD_DIR}/builds/android.sh"
BAZEL_TARGET="//tensorflow/..."
-
+TUT_TEST_DATA_DIR="/tmp/tf_tutorial_test_data"
##########################################################
@@ -116,6 +128,7 @@ echo " TF_BUILD_APPEND_ARGUMENTS=${TF_BUILD_APPEND_ARGUMENTS}"
echo " TF_BUILD_BAZEL_TARGET=${TF_BUILD_BAZEL_TARGET}"
echo " TF_BUILD_BAZEL_CLEAN=${TF_BUILD_BAZEL_CLEAN}"
echo " TF_BUILD_SERIAL_TESTS=${TF_BUILD_SERIAL_TESTS}"
+echo " TF_BUILD_TEST_TUTORIALS=${TF_BUILD_TEST_TUTORIALS}"
# Process container type
CTYPE=${TF_BUILD_CONTAINER_TYPE}
@@ -127,9 +140,8 @@ elif [[ ${CTYPE} == "gpu" ]]; then
elif [[ ${CTYPE} == "android" ]]; then
:
else
- echo "Unrecognized value in TF_BUILD_CONTAINER_TYPE: "\
+ die "Unrecognized value in TF_BUILD_CONTAINER_TYPE: "\
"\"${TF_BUILD_CONTAINER_TYPE}\""
- exit 1
fi
EXTRA_PARAMS=""
@@ -159,15 +171,15 @@ if [[ ${TF_BUILD_IS_OPT} == "no_opt" ]]; then
elif [[ ${TF_BUILD_IS_OPT} == "opt" ]]; then
OPT_FLAG="${OPT_FLAG} -c opt"
else
- echo "Unrecognized value in TF_BUILD_IS_OPT: \"${TF_BUILD_IS_OPT}\""
- exit 1
+ die "Unrecognized value in TF_BUILD_IS_OPT: \"${TF_BUILD_IS_OPT}\""
fi
# Strip whitespaces from OPT_FLAG
OPT_FLAG=$(str_strip "${OPT_FLAG}")
# Process PIP install-test option
-if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]]; then
+if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]] ||
+ [[ ${TF_BUILD_IS_PIP} == "both" ]]; then
# Process optional bazel target override
if [[ ! -z "${TF_BUILD_BAZEL_TARGET}" ]]; then
BAZEL_TARGET=${TF_BUILD_BAZEL_TARGET}
@@ -175,9 +187,9 @@ if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]]; then
if [[ ${CTYPE} == "cpu" ]] || [[ ${CTYPE} == "gpu" ]]; then
# Run Bazel
- MAIN_CMD="${MAIN_CMD} ${BAZEL_CMD} ${OPT_FLAG} "\
+ NO_PIP_MAIN_CMD="${MAIN_CMD} ${BAZEL_CMD} ${OPT_FLAG} "\
"${TF_BUILD_APPEND_ARGUMENTS} ${BAZEL_TARGET}"
- MAIN_CMD=$(str_strip "${MAIN_CMD}")
+ NO_PIP_MAIN_CMD=$(str_strip "${NO_PIP_MAIN_CMD}")
if [[ ! -z "${TF_BUILD_SERIAL_TESTS}" ]] &&
[[ "${TF_BUILD_SERIAL_TESTS}" != "0" ]]; then
@@ -189,15 +201,19 @@ if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]]; then
"${TF_BUILD_APPEND_ARGUMENTS} ${BAZEL_TARGET}"
echo "Build-only command: ${BUILD_ONLY_CMD}"
- MAIN_CMD="${BUILD_ONLY_CMD} && "\
+ NO_PIP_MAIN_CMD="${BUILD_ONLY_CMD} && "\
"${BAZEL_CMD} ${OPT_FLAG} ${BAZEL_SERIAL_FLAG} "\
"${TF_BUILD_APPEND_ARGUMENTS} ${BAZEL_TARGET}"
- echo "Parallel-build + serial-test command: ${MAIN_CMD}"
+ echo "Parallel-build + serial-test command: ${NO_PIP_MAIN_CMD}"
fi
elif [[ ${CTYPE} == "android" ]]; then
- MAIN_CMD="${ANDROID_CMD} ${OPT_FLAG} "
+ NO_PIP_MAIN_CMD="${ANDROID_CMD} ${OPT_FLAG} "
fi
-elif [[ ${TF_BUILD_IS_PIP} == "pip" ]]; then
+
+fi
+
+if [[ ${TF_BUILD_IS_PIP} == "pip" ]] ||
+ [[ ${TF_BUILD_IS_PIP} == "both" ]]; then
# Android builds conflict with PIP builds
if [[ ${CTYPE} == "android" ]]; then
echo "Skipping parameter combination: ${TF_BUILD_IS_PIP} & "\
@@ -205,13 +221,36 @@ elif [[ ${TF_BUILD_IS_PIP} == "pip" ]]; then
exit 0
fi
- MAIN_CMD="${MAIN_CMD} ${PIP_CMD} ${CTYPE} "\
+ PIP_MAIN_CMD="${MAIN_CMD} ${PIP_CMD} ${CTYPE} "\
"${TF_BUILD_APPEND_ARGUMENTS}"
+
+ # Add command for tutorial test
+ if [[ ! -z "${TF_BUILD_TEST_TUTORIALS}" ]] &&
+ [[ "${TF_BUILD_TEST_TUTORIALS}" != "0" ]]; then
+ PIP_MAIN_CMD="${PIP_MAIN_CMD} ${PIP_TEST_TUTORIALS_FLAG}"
+
+ # Prepare data directory for tutorial tests
+ mkdir -p "${TUT_TEST_DATA_DIR}" ||
+ die "FAILED to create data directory for tutorial tests: "\
+ "${TUT_TEST_DATA_DIR}"
+
+ if [[ "${DO_DOCKER}" == "1" ]]; then
+ EXTRA_PARAMS="${EXTRA_PARAMS} -v ${TUT_TEST_DATA_DIR}:${TUT_TEST_DATA_DIR}"
+ fi
+ fi
+fi
+
+if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]]; then
+ MAIN_CMD="${NO_PIP_MAIN_CMD}"
+elif [[ ${TF_BUILD_IS_PIP} == "pip" ]]; then
+ MAIN_CMD="${PIP_MAIN_CMD}"
+elif [[ ${TF_BUILD_IS_PIP} == "both" ]]; then
+ MAIN_CMD="${NO_PIP_MAIN_CMD} && ${PIP_MAIN_CMD}"
else
- echo "Unrecognized value in TF_BUILD_IS_PIP: \"${TF_BUILD_IS_PIP}\""
- exit 1
+ die "Unrecognized value in TF_BUILD_IS_PIP: \"${TF_BUILD_IS_PIP}\""
fi
+
# Process Python version
if [[ ${TF_BUILD_PYTHON_VERSION} == "python2" ]]; then
:
@@ -223,8 +262,7 @@ elif [[ ${TF_BUILD_PYTHON_VERSION} == "python3" ]]; then
# Determine the path to python3
PYTHON3_PATH=$(which python3 | head -1)
if [[ -z "${PYTHON3_PATH}" ]]; then
- echo "ERROR: Failed to locate python3 binary on the system"
- exit 1
+ die "ERROR: Failed to locate python3 binary on the system"
else
echo "Found python3 binary at: ${PYTHON3_PATH}"
fi
@@ -233,9 +271,8 @@ elif [[ ${TF_BUILD_PYTHON_VERSION} == "python3" ]]; then
fi
else
- echo "Unrecognized value in TF_BUILD_PYTHON_VERSION: "\
+ die "Unrecognized value in TF_BUILD_PYTHON_VERSION: "\
"\"${TF_BUILD_PYTHON_VERSION}\""
- exit 1
fi
# Append additional Docker extra parameters
@@ -253,6 +290,15 @@ TMP_SCRIPT=/tmp/ci_parameterized_build_${RAND_STR}.sh
if [[ "${DO_DOCKER}" == "1" ]]; then
# Map the tmp script into the Docker container
EXTRA_PARAMS="${EXTRA_PARAMS} -v ${TMP_SCRIPT}:/tmp/tf_build.sh"
+
+ if [[ ! -z "${TF_BUILD_BAZEL_CLEAN}" ]] &&
+ [[ "${TF_BUILD_BAZEL_CLEAN}" != "0" ]] &&
+ [[ "${TF_BUILD_IS_PIP}" != "both" ]]; then
+ # For TF_BUILD_IS_PIP == both, "bazel clean" will have already
+ # been performed before the "bazel test" step
+ EXTRA_PARAMS="${EXTRA_PARAMS} -e TF_BUILD_BAZEL_CLEAN=1"
+ fi
+
EXTRA_PARAMS=$(str_strip "${EXTRA_PARAMS}")
echo "Exporting CI_DOCKER_EXTRA_PARAMS: ${EXTRA_PARAMS}"
@@ -275,6 +321,7 @@ echo ""
chmod +x ${TMP_SCRIPT}
+FAILURE=0
if [[ ! -z "${TF_BUILD_DRY_RUN}" ]] && [[ ${TF_BUILD_DRY_RUN} != "0" ]]; then
# Do a dry run: just print the final command
echo "*** This is a DRY RUN ***"
@@ -285,7 +332,12 @@ else
else
${TMP_SCRIPT}
fi
-fi && FAILURE=0 || FAILURE=1
+
+ if [[ $? != "0" ]]; then
+ FAILURE=1
+ fi
+fi
+
[[ ${FAILURE} == "0" ]] && RESULT="SUCCESS" || RESULT="FAILURE"
rm -f ${TMP_SCRIPT}
diff --git a/tensorflow/tools/ci_build/install/install_bazel.sh b/tensorflow/tools/ci_build/install/install_bazel.sh
index 8c3aa2b639..e6ac91e722 100755
--- a/tensorflow/tools/ci_build/install/install_bazel.sh
+++ b/tensorflow/tools/ci_build/install/install_bazel.sh
@@ -17,7 +17,7 @@
set -e
# Select bazel version.
-BAZEL_VERSION="0.1.4"
+BAZEL_VERSION="0.2.0"
# Install bazel.
mkdir /bazel
diff --git a/tensorflow/tools/ci_build/install/install_openjdk8_from_ppa.sh b/tensorflow/tools/ci_build/install/install_bootstrap_deb_packages.sh
index 7f2e8be8c8..3b574692a0 100755
--- a/tensorflow/tools/ci_build/install/install_openjdk8_from_ppa.sh
+++ b/tensorflow/tools/ci_build/install/install_bootstrap_deb_packages.sh
@@ -16,9 +16,9 @@
set -e
-# Install openjdk 8 for bazel from PPA (it is not available in 14.04).
-add-apt-repository -y ppa:openjdk-r/ppa
+# Install bootstrap dependencies from ubuntu deb repository.
apt-get update
-apt-get install -y openjdk-8-jdk openjdk-8-jre-headless
+apt-get install -y \
+ software-properties-common
apt-get clean
rm -rf /var/lib/apt/lists/*
diff --git a/tensorflow/tools/ci_build/install/install_deb_packages.sh b/tensorflow/tools/ci_build/install/install_deb_packages.sh
index 9fe28b7894..b752e86d69 100755
--- a/tensorflow/tools/ci_build/install/install_deb_packages.sh
+++ b/tensorflow/tools/ci_build/install/install_deb_packages.sh
@@ -23,14 +23,17 @@ apt-get install -y \
build-essential \
curl \
git \
+ openjdk-8-jdk \
+ openjdk-8-jre-headless \
pkg-config \
python-dev \
python-numpy \
python-pip \
+ python-virtualenv \
python3-dev \
python3-numpy \
python3-pip \
- software-properties-common \
+ sudo \
swig \
unzip \
wget \
diff --git a/tensorflow/tools/ci_build/update_version.sh b/tensorflow/tools/ci_build/update_version.sh
new file mode 100755
index 0000000000..36a1e39f3a
--- /dev/null
+++ b/tensorflow/tools/ci_build/update_version.sh
@@ -0,0 +1,134 @@
+#!/usr/bin/env bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# Automatically update TensorFlow version in source files
+#
+# Usage: update_version.sh <new_major_ver>.<new_minor_ver>.<new_patch_ver>
+# e.g.,
+# update_version.sh 0.7.2
+#
+
+# Helper functions
+die() {
+ echo $1
+ exit 1
+}
+
+check_existence() {
+ # Usage: check_exists (dir|file) <path>
+
+ if [[ "$1" == "dir" ]]; then
+ test -d "$2" ||
+ die "ERROR: Cannot find directory ${2}. "\
+"Are you under the TensorFlow source root directory?"
+ else
+ test -f "$2" ||
+ die "ERROR: Cannot find file ${2}. "\
+"Are you under the TensorFlow source root directory?"
+ fi
+}
+
+
+TF_SRC_DIR="tensorflow"
+check_existence dir "${TF_SRC_DIR}"
+
+# Process command-line arguments
+if [[ $# != 1 ]]; then
+ die "Usage: $(basename $0) <new_major_ver>.<new_minor_ver>.<new_patch_ver>"
+fi
+NEW_VER=$1
+
+# Check validity of new version string
+echo "${NEW_VER}" | grep -q -E "[0-9]+\.[0-9]+\.[0-9]+"
+if [[ $? != "0" ]]; then
+ die "ERROR: Invalid new version: \"${NEW_VER}\""
+fi
+
+# Extract major, minor and patch versions
+MAJOR=$(echo "${NEW_VER}" | cut -d \. -f 1)
+MINOR=$(echo "${NEW_VER}" | cut -d \. -f 2)
+PATCH=$(echo "${NEW_VER}" | cut -d \. -f 3)
+
+# Update tensorflow/core/public/version.h
+VERSION_H="${TF_SRC_DIR}/core/public/version.h"
+check_existence file "${VERSION_H}"
+
+OLD_MAJOR=$(cat ${VERSION_H} | grep -E "^#define TF_MAJOR_VERSION [0-9]+" | \
+cut -d ' ' -f 3)
+OLD_MINOR=$(cat ${VERSION_H} | grep -E "^#define TF_MINOR_VERSION [0-9]+" | \
+cut -d ' ' -f 3)
+OLD_PATCH=$(cat ${VERSION_H} | grep -E "^#define TF_PATCH_VERSION [0-9]+" | \
+cut -d ' ' -f 3)
+
+sed -i -e "s/^#define TF_MAJOR_VERSION ${OLD_MAJOR}/#define TF_MAJOR_VERSION ${MAJOR}/g" ${VERSION_H}
+sed -i -e "s/^#define TF_MINOR_VERSION ${OLD_MINOR}/#define TF_MINOR_VERSION ${MINOR}/g" ${VERSION_H}
+sed -i -e "s/^#define TF_PATCH_VERSION ${OLD_PATCH}/#define TF_PATCH_VERSION ${PATCH}/g" "${VERSION_H}"
+
+
+# Update setup.py
+SETUP_PY="${TF_SRC_DIR}/tools/pip_package/setup.py"
+check_existence file "${SETUP_PY}"
+
+sed -i -e "s/^\_VERSION = [\'\"].*[\'\"]/\_VERSION = \'${MAJOR}.${MINOR}.${PATCH}\'/g" "${SETUP_PY}"
+
+
+# Update Dockerfiles in tensorflow/tools/docker/
+TOOLS_DOCKER_DIR="${TF_SRC_DIR}/tools/docker"
+check_existence dir "${TOOLS_DOCKER_DIR}"
+
+# Determine the files that need to be modified
+DOCKERFILES=$(grep -lrE "^ENV TENSORFLOW_VERSION .+" ${TOOLS_DOCKER_DIR})
+for DOCKERF in ${DOCKERFILES}; do
+ sed -i -r -e "s/^ENV TENSORFLOW_VERSION .+/ENV TENSORFLOW_VERSION ${MAJOR}.${MINOR}.${PATCH}/g" "${DOCKERF}"
+done
+
+
+# Update os_setup.md
+OS_SETUP="${TF_SRC_DIR}/g3doc/get_started/os_setup.md"
+check_existence file "${OS_SETUP}"
+
+sed -i -r -e "s/(.*pip[0-9]* install .*tensorflow-)([0-9]+\.[0-9]+\.[0-9]+)(.*\.whl)/\1${MAJOR}.${MINOR}.${PATCH}\3/g" "${OS_SETUP}"
+
+sed -i -r -e "s/(.*\(e\.g\..*[^0-9])([0-9]+\.[0-9]+\.[0-9]+)([^0-9].*\).*)/\1${MAJOR}.${MINOR}.${PATCH}\3/g" "${OS_SETUP}"
+
+
+# Update README.md
+README_MD="./README.md"
+check_existence file "${README_MD}"
+
+sed -i -r -e "s/${OLD_MAJOR}\.${OLD_MINOR}\.${OLD_PATCH}/${MAJOR}.${MINOR}.${PATCH}/g" "${README_MD}"
+
+
+echo "Major: ${OLD_MAJOR} -> ${MAJOR}"
+echo "Minor: ${OLD_MINOR} -> ${MINOR}"
+echo "Patch: ${OLD_PATCH} -> ${PATCH}"
+echo ""
+
+# Look for potentially lingering old version strings in TensorFlow source files
+OLD_VER="${OLD_MAJOR}\.${OLD_MINOR}\.${OLD_PATCH}"
+LINGER_STRS=$(grep -rnoH "${OLD_VER}" "${TF_SRC_DIR}")
+
+if [[ ! -z "${LINGER_STRS}" ]]; then
+ echo "WARNING: Below are potentially instances of lingering old version "\
+"string (${OLD_VER}) in source directory \"${TF_SRC_DIR}/\" that are not "\
+"updated by this script. Please check them manually!"
+ for LINGER_STR in ${LINGER_STRS}; do
+ echo "${LINGER_STR}"
+ done
+else
+ echo "No lingering old version strings found in source directory "\
+"\"${TF_SRC_DIR}/\". Good."
+fi
diff --git a/tensorflow/tools/docker/Dockerfile b/tensorflow/tools/docker/Dockerfile
index 69e502d098..a8156559ed 100644
--- a/tensorflow/tools/docker/Dockerfile
+++ b/tensorflow/tools/docker/Dockerfile
@@ -4,6 +4,7 @@ MAINTAINER Craig Citro <craigcitro@google.com>
# Pick up some TF dependencies
RUN apt-get update && apt-get install -y \
+ bc \
curl \
libfreetype6-dev \
libpng12-dev \
@@ -28,13 +29,16 @@ RUN pip --no-cache-dir install \
python -m ipykernel.kernelspec
# Install TensorFlow CPU version.
-ENV TENSORFLOW_VERSION 0.7.0
+ENV TENSORFLOW_VERSION 0.7.1
RUN pip --no-cache-dir install \
- http://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-${TENSORFLOW_VERSION}-py2-none-linux_x86_64.whl
+ http://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
# Set up our notebook config.
COPY jupyter_notebook_config.py /root/.jupyter/
+# Copy sample notebooks.
+COPY notebooks /notebooks
+
# Jupyter has issues with being run directly:
# https://github.com/ipython/ipython/issues/7062
# We just add a little wrapper script.
@@ -45,6 +49,6 @@ EXPOSE 6006
# IPython
EXPOSE 8888
-WORKDIR "/root"
+WORKDIR "/notebooks"
-CMD ["/bin/bash"]
+CMD ["/run_jupyter.sh"]
diff --git a/tensorflow/tools/docker/Dockerfile.devel b/tensorflow/tools/docker/Dockerfile.devel
index 1a30c7d700..ac8e885fd7 100644
--- a/tensorflow/tools/docker/Dockerfile.devel
+++ b/tensorflow/tools/docker/Dockerfile.devel
@@ -64,7 +64,7 @@ RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/root/.bazelrc
ENV BAZELRC /root/.bazelrc
# Install the most recent bazel release.
-ENV BAZEL_VERSION 0.1.4
+ENV BAZEL_VERSION 0.2.0
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu b/tensorflow/tools/docker/Dockerfile.devel-gpu
index 56de5940ab..3c85d16a9d 100644
--- a/tensorflow/tools/docker/Dockerfile.devel-gpu
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu
@@ -1,4 +1,4 @@
-FROM nvidia/cuda:7.0-cudnn2-devel
+FROM nvidia/cuda:7.5-cudnn4-devel
MAINTAINER Craig Citro <craigcitro@google.com>
@@ -64,7 +64,7 @@ RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/root/.bazelrc
ENV BAZELRC /root/.bazelrc
# Install the most recent bazel release.
-ENV BAZEL_VERSION 0.1.4
+ENV BAZEL_VERSION 0.2.0
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
@@ -96,7 +96,6 @@ WORKDIR /root
# Set up CUDA variables
ENV CUDA_PATH /usr/local/cuda
-ENV LD_LIBRARY_PATH /usr/local/cuda/lib64
# TensorBoard
EXPOSE 6006
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index 77699ebb42..8d2ede62b5 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -1,9 +1,10 @@
-FROM nvidia/cuda:7.0-cudnn2-runtime
+FROM nvidia/cuda:7.5-cudnn4-runtime
MAINTAINER Craig Citro <craigcitro@google.com>
# Pick up some TF dependencies
RUN apt-get update && apt-get install -y \
+ bc \
curl \
libfreetype6-dev \
libpng12-dev \
@@ -28,13 +29,16 @@ RUN pip --no-cache-dir install \
python -m ipykernel.kernelspec
# Install TensorFlow GPU version.
-ENV TENSORFLOW_VERSION 0.7.0
+ENV TENSORFLOW_VERSION 0.7.1
RUN pip --no-cache-dir install \
- http://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-${TENSORFLOW_VERSION}-py2-none-linux_x86_64.whl
+ http://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
# Set up our notebook config.
COPY jupyter_notebook_config.py /root/.jupyter/
+# Copy sample notebooks.
+COPY notebooks /notebooks
+
# Jupyter has issues with being run directly:
# https://github.com/ipython/ipython/issues/7062
# We just add a little wrapper script.
@@ -45,6 +49,6 @@ EXPOSE 6006
# IPython
EXPOSE 8888
-WORKDIR "/root"
+WORKDIR "/notebooks"
-CMD ["/bin/bash"]
+CMD ["/run_jupyter.sh"]
diff --git a/tensorflow/tools/docker/README.md b/tensorflow/tools/docker/README.md
index e94b11e4f2..f7ec66d933 100644
--- a/tensorflow/tools/docker/README.md
+++ b/tensorflow/tools/docker/README.md
@@ -38,7 +38,7 @@ NVidia libraries available on their system, as well as providing mappings so
that the container can see the host's GPU. For most purposes, this can be
accomplished via
- $ export CUDA_SO=$(\ls /usr/lib/x86_64-linux-gnu/libcuda* | xargs -I{} echo '-v {}:{}')
+ $ export CUDA_SO=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | xargs -I{} echo '-v {}:{}')
$ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
$ docker run -it -p 8888:8888 $CUDA_SO $DEVICES b.gcr.io/tensorflow/tensorflow-devel-gpu
diff --git a/tensorflow/tools/docker/docker_run_gpu.sh b/tensorflow/tools/docker/docker_run_gpu.sh
index 699b39dae1..9ebfa701e4 100755
--- a/tensorflow/tools/docker/docker_run_gpu.sh
+++ b/tensorflow/tools/docker/docker_run_gpu.sh
@@ -24,7 +24,7 @@ if [ ! -d ${CUDA_HOME}/lib64 ]; then
exit 1
fi
-export CUDA_SO=$(\ls /usr/lib/x86_64-linux-gnu/libcuda* | \
+export CUDA_SO=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | \
xargs -I{} echo '-v {}:{}')
export DEVICES=$(\ls /dev/nvidia* | \
xargs -I{} echo '--device {}:{}')
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 7474757a6c..11b57134f6 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -19,6 +19,7 @@ from __future__ import print_function
import fnmatch
import os
+import platform
import re
import sys
@@ -26,10 +27,17 @@ from setuptools import find_packages, setup, Command, Extension
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
-_VERSION = '0.7.0'
+_VERSION = '0.7.1'
+
+numpy_version = "1.8.2"
+if platform.system() == "Darwin":
+ # There are bugs with numpy pip installation on OS X prior to
+ # 1.10.1, so on mac we require a higher version than on other
+ # platforms.
+ numpy_version = "1.10.1"
REQUIRED_PACKAGES = [
- 'numpy >= 1.8.2',
+ 'numpy >= %s' % numpy_version,
'six >= 1.10.0',
'protobuf == 3.0.0b2',
]
@@ -43,7 +51,7 @@ else:
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
- 'tensorboard = tensorflow.tensorboard.backend.tensorboard:main',
+ 'tensorboard = tensorflow.tensorboard.tensorboard:main',
]
# pylint: enable=line-too-long
diff --git a/tensorflow/tools/test/BUILD b/tensorflow/tools/test/BUILD
new file mode 100644
index 0000000000..4a8bb87a77
--- /dev/null
+++ b/tensorflow/tools/test/BUILD
@@ -0,0 +1,40 @@
+# Description:
+# Tools for testing
+
+package(default_visibility = ["//tensorflow:__subpackages__"])
+
+licenses(["notice"]) # Apache 2.0
+
+exports_files(["LICENSE"])
+
+py_library(
+ name = "system_info_lib",
+ srcs = [
+ "gpu_info_lib.py",
+ "system_info_lib.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = ["//tensorflow:tensorflow_py"],
+)
+
+py_binary(
+ name = "system_info",
+ srcs = ["system_info.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":system_info_lib",
+ "//tensorflow:tensorflow_py",
+ ],
+)
+
+filegroup(
+ name = "all_files",
+ srcs = glob(
+ ["**/*"],
+ exclude = [
+ "**/METADATA",
+ "**/OWNERS",
+ ],
+ ),
+ visibility = ["//tensorflow:__subpackages__"],
+)
diff --git a/tensorflow/tools/test/__init__.py b/tensorflow/tools/test/__init__.py
new file mode 100644
index 0000000000..0468856532
--- /dev/null
+++ b/tensorflow/tools/test/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Tools for testing."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
diff --git a/tensorflow/tools/test/gpu_info_lib.py b/tensorflow/tools/test/gpu_info_lib.py
new file mode 100644
index 0000000000..cfb7d89920
--- /dev/null
+++ b/tensorflow/tools/test/gpu_info_lib.py
@@ -0,0 +1,184 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Library for getting system information during TensorFlow tests."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import ctypes as ct
+import platform
+
+import tensorflow as tf
+
+from tensorflow.core.util import test_log_pb2
+
+
+def _gather_gpu_devices_proc():
+ """Try to gather NVidia GPU device information via /proc/driver."""
+ dev_info = []
+ for f in tf.gfile.Glob("/proc/driver/nvidia/gpus/*/information"):
+ bus_id = f.split("/")[5]
+ key_values = dict(
+ line.rstrip().replace("\t", "").split(":", 1)
+ for line in tf.gfile.GFile(f, "r"))
+ key_values = dict(
+ (k.lower(), v.strip(" ").rstrip(" "))
+ for (k, v) in key_values.items())
+ info = test_log_pb2.GPUInfo()
+ info.model = key_values.get("model", "Unknown")
+ info.uuid = key_values.get("gpu uuid", "Unknown")
+ info.bus_id = bus_id
+ dev_info.append(info)
+ return dev_info
+
+
+class CUDADeviceProperties(ct.Structure):
+ # See $CUDA_HOME/include/cuda_runtime_api.h for the definition of
+ # the cudaDeviceProp struct.
+ _fields_ = [
+ ("name", ct.c_char * 256),
+ ("totalGlobalMem", ct.c_size_t),
+ ("sharedMemPerBlock", ct.c_size_t),
+ ("regsPerBlock", ct.c_int),
+ ("warpSize", ct.c_int),
+ ("memPitch", ct.c_size_t),
+ ("maxThreadsPerBlock", ct.c_int),
+ ("maxThreadsDim", ct.c_int * 3),
+ ("maxGridSize", ct.c_int * 3),
+ ("clockRate", ct.c_int),
+ ("totalConstMem", ct.c_size_t),
+ ("major", ct.c_int),
+ ("minor", ct.c_int),
+ ("textureAlignment", ct.c_size_t),
+ ("texturePitchAlignment", ct.c_size_t),
+ ("deviceOverlap", ct.c_int),
+ ("multiProcessorCount", ct.c_int),
+ ("kernelExecTimeoutEnabled", ct.c_int),
+ ("integrated", ct.c_int),
+ ("canMapHostMemory", ct.c_int),
+ ("computeMode", ct.c_int),
+ ("maxTexture1D", ct.c_int),
+ ("maxTexture1DMipmap", ct.c_int),
+ ("maxTexture1DLinear", ct.c_int),
+ ("maxTexture2D", ct.c_int * 2),
+ ("maxTexture2DMipmap", ct.c_int * 2),
+ ("maxTexture2DLinear", ct.c_int * 3),
+ ("maxTexture2DGather", ct.c_int * 2),
+ ("maxTexture3D", ct.c_int * 3),
+ ("maxTexture3DAlt", ct.c_int * 3),
+ ("maxTextureCubemap", ct.c_int),
+ ("maxTexture1DLayered", ct.c_int * 2),
+ ("maxTexture2DLayered", ct.c_int * 3),
+ ("maxTextureCubemapLayered", ct.c_int * 2),
+ ("maxSurface1D", ct.c_int),
+ ("maxSurface2D", ct.c_int * 2),
+ ("maxSurface3D", ct.c_int * 3),
+ ("maxSurface1DLayered", ct.c_int * 2),
+ ("maxSurface2DLayered", ct.c_int * 3),
+ ("maxSurfaceCubemap", ct.c_int),
+ ("maxSurfaceCubemapLayered", ct.c_int * 2),
+ ("surfaceAlignment", ct.c_size_t),
+ ("concurrentKernels", ct.c_int),
+ ("ECCEnabled", ct.c_int),
+ ("pciBusID", ct.c_int),
+ ("pciDeviceID", ct.c_int),
+ ("pciDomainID", ct.c_int),
+ ("tccDriver", ct.c_int),
+ ("asyncEngineCount", ct.c_int),
+ ("unifiedAddressing", ct.c_int),
+ ("memoryClockRate", ct.c_int),
+ ("memoryBusWidth", ct.c_int),
+ ("l2CacheSize", ct.c_int),
+ ("maxThreadsPerMultiProcessor", ct.c_int),
+ ("streamPrioritiesSupported", ct.c_int),
+ ("globalL1CacheSupported", ct.c_int),
+ ("localL1CacheSupported", ct.c_int),
+ ("sharedMemPerMultiprocessor", ct.c_size_t),
+ ("regsPerMultiprocessor", ct.c_int),
+ ("managedMemSupported", ct.c_int),
+ ("isMultiGpuBoard", ct.c_int),
+ ("multiGpuBoardGroupID", ct.c_int),
+ # Pad with extra space to avoid dereference crashes if future
+ # versions of CUDA extend the size of this struct.
+ ("__future_buffer", ct.c_char * 4096)]
+
+
+def _gather_gpu_devices_cudart():
+ """Try to gather NVidia GPU device information via libcudart."""
+ dev_info = []
+
+ system = platform.system()
+ if system == "Linux":
+ libcudart = ct.cdll.LoadLibrary("libcudart.so")
+ elif system == "Darwin":
+ libcudart = ct.cdll.LoadLibrary("libcudart.dylib")
+ elif system == "Windows":
+ libcudart = ct.windll.LoadLibrary("libcudart.dll")
+ else:
+ raise NotImplementedError("Cannot identify system.")
+
+ version = ct.c_int()
+ rc = libcudart.cudaRuntimeGetVersion(ct.byref(version))
+ if rc != 0:
+ raise ValueError("Could not get version")
+ if version.value < 6050:
+ raise NotImplementedError("CUDA version must be between >= 6.5")
+
+ device_count = ct.c_int()
+ libcudart.cudaGetDeviceCount(ct.byref(device_count))
+
+ for i in range(device_count.value):
+ properties = CUDADeviceProperties()
+ rc = libcudart.cudaGetDeviceProperties(ct.byref(properties), i)
+ if rc != 0:
+ raise ValueError("Could not get device properties")
+ pci_bus_id = " " * 13
+ rc = libcudart.cudaDeviceGetPCIBusId(ct.c_char_p(pci_bus_id), 13, i)
+ if rc != 0:
+ raise ValueError("Could not get device PCI bus id")
+
+ info = test_log_pb2.GPUInfo() # No UUID available
+ info.model = properties.name
+ info.bus_id = pci_bus_id
+ dev_info.append(info)
+
+ del properties
+
+ return dev_info
+
+
+def gather_gpu_devices():
+ """Gather gpu device info.
+
+ Returns:
+ A list of test_log_pb2.GPUInfo messages.
+ """
+ try:
+ # Prefer using /proc if possible, it provides the UUID.
+ dev_info = _gather_gpu_devices_proc()
+ if not dev_info:
+ raise ValueError("No devices found")
+ return dev_info
+ except (IOError, ValueError):
+ pass
+
+ try:
+ # Fall back on using libcudart
+ return _gather_gpu_devices_cudart()
+ except (OSError, ValueError, NotImplementedError):
+ return []
diff --git a/tensorflow/tools/test/system_info.py b/tensorflow/tools/test/system_info.py
new file mode 100644
index 0000000000..dcbbe1ce1a
--- /dev/null
+++ b/tensorflow/tools/test/system_info.py
@@ -0,0 +1,33 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Library for getting system information during TensorFlow tests."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import tensorflow as tf
+
+from tensorflow.tools.test import system_info_lib
+
+
+def main(unused_args):
+ config = system_info_lib.gather_machine_configuration()
+ print(config)
+
+
+if __name__ == "__main__":
+ tf.app.run()
diff --git a/tensorflow/tools/test/system_info_lib.py b/tensorflow/tools/test/system_info_lib.py
new file mode 100644
index 0000000000..c36a6c6b13
--- /dev/null
+++ b/tensorflow/tools/test/system_info_lib.py
@@ -0,0 +1,149 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Library for getting system information during TensorFlow tests."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import multiprocessing
+import platform
+import re
+import socket
+
+import tensorflow as tf
+
+# pylint: disable=g-bad-import-order
+# Note: cpuinfo and psutil are not installed for you in the TensorFlow
+# OSS tree. They are installable via pip.
+import cpuinfo
+import psutil
+# pylint: enable=g-bad-import-order
+
+from tensorflow.core.util import test_log_pb2
+from tensorflow.python.client import device_lib
+from tensorflow.tools.test import gpu_info_lib
+
+
+def gather_machine_configuration():
+ """Gather Machine Configuration. This is the top level fn of this library."""
+ config = test_log_pb2.MachineConfiguration()
+
+ config.cpu_info.CopyFrom(gather_cpu_info())
+ config.platform_info.CopyFrom(gather_platform_info())
+
+ # gather_available_device_info must come before gather_gpu_devices
+ # because the latter may access libcudart directly, which confuses
+ # TensorFlow StreamExecutor.
+ for d in gather_available_device_info():
+ config.available_device_info.add().CopyFrom(d)
+ for gpu in gpu_info_lib.gather_gpu_devices():
+ config.device_info.add().Pack(gpu)
+
+ config.memory_info.CopyFrom(gather_memory_info())
+
+ config.hostname = gather_hostname()
+
+ return config
+
+
+def gather_hostname():
+ return socket.gethostname()
+
+
+def gather_memory_info():
+ """Gather memory info."""
+ mem_info = test_log_pb2.MemoryInfo()
+ vmem = psutil.virtual_memory()
+ mem_info.total = vmem.total
+ mem_info.available = vmem.available
+ return mem_info
+
+
+def gather_cpu_info():
+ """Gather CPU Information. Assumes all CPUs are the same."""
+ cpu_info = test_log_pb2.CPUInfo()
+ cpu_info.num_cores = multiprocessing.cpu_count()
+
+ # Gather num_cores_allowed
+ try:
+ with tf.gfile.GFile('/proc/self/status') as fh:
+ nc = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', fh.read())
+ if nc: # e.g. 'ff' => 8, 'fff' => 12
+ cpu_info.num_cores_allowed = (
+ bin(int(nc.group(1).replace(',', ''), 16)).count('1'))
+ except IOError:
+ pass
+ finally:
+ if cpu_info.num_cores_allowed == 0:
+ cpu_info.num_cores_allowed = cpu_info.num_cores
+
+ # Gather the rest
+ info = cpuinfo.get_cpu_info()
+ cpu_info.cpu_info = info['brand']
+ cpu_info.num_cores = info['count']
+ cpu_info.mhz_per_cpu = info['hz_advertised_raw'][0] / 1.0e6
+ l2_cache_size = re.match(r'(\d+)', str(info['l2_cache_size']))
+ if l2_cache_size:
+ # If a value is returned, it's in KB
+ cpu_info.cache_size['L2'] = int(l2_cache_size.group(0)) * 1024
+
+ # Try to get the CPU governor
+ try:
+ cpu_governors = set([
+ tf.gfile.GFile(f, 'r').readline().rstrip()
+ for f in tf.gfile.Glob(
+ '/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')])
+ if cpu_governors:
+ if len(cpu_governors) > 1:
+ cpu_info.cpu_governor = 'mixed'
+ else:
+ cpu_info.cpu_governor = list(cpu_governors)[0]
+ except IOError:
+ pass
+
+ return cpu_info
+
+
+def gather_available_device_info():
+ """Gather list of devices available to TensorFlow.
+
+ Returns:
+ A list of test_log_pb2.AvailableDeviceInfo messages.
+ """
+ device_info_list = []
+ devices = device_lib.list_local_devices()
+
+ for d in devices:
+ device_info = test_log_pb2.AvailableDeviceInfo()
+ device_info.name = d.name
+ device_info.type = d.device_type
+ device_info.memory_limit = d.memory_limit
+ device_info.physical_description = d.physical_device_desc
+ device_info_list.append(device_info)
+
+ return device_info_list
+
+
+def gather_platform_info():
+ """Gather platform info."""
+ platform_info = test_log_pb2.PlatformInfo()
+ (platform_info.bits, platform_info.linkage) = platform.architecture()
+ platform_info.machine = platform.machine()
+ platform_info.release = platform.release()
+ platform_info.system = platform.system()
+ platform_info.version = platform.version()
+ return platform_info