aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/contrib/cmake/CMakeLists.txt58
-rw-r--r--tensorflow/contrib/cmake/README.md28
-rw-r--r--tensorflow/contrib/cmake/external/mkldnn.cmake44
-rw-r--r--tensorflow/core/common_runtime/mkl_cpu_allocator.h4
-rw-r--r--tensorflow/core/graph/mkl_tfconversion_pass.h4
-rw-r--r--tensorflow/core/kernels/mkl_relu_op.cc8
-rw-r--r--tensorflow/core/util/mkl_util.h4
7 files changed, 144 insertions, 6 deletions
diff --git a/tensorflow/contrib/cmake/CMakeLists.txt b/tensorflow/contrib/cmake/CMakeLists.txt
index a7944ea74a..95df69465a 100644
--- a/tensorflow/contrib/cmake/CMakeLists.txt
+++ b/tensorflow/contrib/cmake/CMakeLists.txt
@@ -31,10 +31,14 @@ option(tensorflow_BUILD_PYTHON_TESTS "Build python unit tests " OFF)
option(tensorflow_BUILD_MORE_PYTHON_TESTS "Build more python unit tests for contrib packages" OFF)
option(tensorflow_BUILD_SHARED_LIB "Build TensorFlow as a shared library" OFF)
option(tensorflow_OPTIMIZE_FOR_NATIVE_ARCH "Enable compiler optimizations for the native processor architecture (if available)" ON)
-option(tensorflow_WIN_CPU_SIMD_OPTIONS "Enables CPU SIMD instructions")
option(tensorflow_ENABLE_SNAPPY_SUPPORT "Enable SNAPPY compression support" ON)
option(tensorflow_DISABLE_EIGEN_FORCEINLINE "Disable forceinline, to speed up build on windows." OFF)
+# SIMD, MKL and MKLDNN options
+option(tensorflow_WIN_CPU_SIMD_OPTIONS "Enables CPU SIMD instructions" OFF)
+option(tensorflow_ENABLE_MKL_SUPPORT "Enable Intel MKL support" OFF)
+option(tensorflow_ENABLE_MKLDNN_SUPPORT "Enable Intel MKLDNN support, requires MKL enabled" OFF)
+
# GPU, CUDA and cuDNN options
option(tensorflow_ENABLE_GPU "Enable GPU support" OFF)
set(tensorflow_CUDA_VERSION "9.0" CACHE STRING "CUDA version to build against")
@@ -162,12 +166,21 @@ endif()
# MSVC SIMD instructions
if (tensorflow_WIN_CPU_SIMD_OPTIONS)
+ include(CheckCXXCompilerFlag)
+ if (tensorflow_ENABLE_MKL_SUPPORT)
+ add_definitions(-DINTEL_MKL -DEIGEN_USE_VML)
+ if (NOT tensorflow_ENABLE_MKLDNN_SUPPORT)
+ add_definitions(-DINTEL_MKL_ML)
+ endif()
+ endif()
+ CHECK_CXX_COMPILER_FLAG("-fopenmp" COMPILER_OPT_OPENMP_SUPPORT)
+ if (COMPILER_OPT_OPENMP_SUPPORT)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
+ endif()
if (WIN32)
- CHECK_CXX_COMPILER_FLAG("${tensorflow_WIN_CPU_SIMD_OPTIONS}" COMPILER_OPT_WIN_CPU_SIMD_SUPPORTED)
+ CHECK_CXX_COMPILER_FLAG(${tensorflow_WIN_CPU_SIMD_OPTIONS} COMPILER_OPT_WIN_CPU_SIMD_SUPPORTED)
if(COMPILER_OPT_WIN_CPU_SIMD_SUPPORTED)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${tensorflow_WIN_CPU_SIMD_OPTIONS}")
- else()
- message(FATAL_ERROR "${tensorflow_WIN_CPU_SIMD_OPTIONS} not supported")
endif()
endif()
endif()
@@ -298,6 +311,43 @@ if(HAIKU)
list(APPEND tensorflow_EXTERNAL_LIBRARIES network)
endif()
+if (tensorflow_ENABLE_MKL_SUPPORT)
+ if (WIN32)
+ find_path(MKL_HOME_PLATFORM mkl
+ PATHS ${MKL_HOME} ${MKL_HOME}/../ ${MKL_HOME}/../../
+ PATH_SUFFIXES windows)
+ set(MKL_INCLUDE_DIRS ${MKL_HOME_PLATFORM}/mkl/include)
+ set(MKL_LINK_DIRS
+ ${MKL_HOME_PLATFORM}/mkl/lib/intel64
+ ${MKL_HOME_PLATFORM}/tbb/lib/intel64/vc_mt
+ ${MKL_HOME_PLATFORM}/compiler/lib/intel64
+ ${MKL_HOME_PLATFORM}/mkl/tools/builder/lib)
+ set(MKL_REDIST_DLL_DIRS
+ ${MKL_HOME_PLATFORM}/redist/intel64/mkl
+ ${MKL_HOME_PLATFORM}/redist/intel64/tbb/vc_mt
+ ${MKL_HOME_PLATFORM}/redist/intel64/compiler)
+ list(APPEND tensorflow_EXTERNAL_LIBRARIES
+ mkl_intel_lp64_dll mkl_sequential_dll mkl_core_dll mkl_rt mkl_cdll_intel64)
+ endif()
+ if (UNIX)
+ # Fix me: complete the path on linux
+ find_path(MKL_HOME_PLATFORM mkl
+ HINTS ${MKL_HOME} ${MKL_HOME}/../ ${MKL_HOME}/../../
+ PATH_SUFFIXES linux)
+ set(MKL_INCLUDE_DIRS ${MKL_HOME_PLATFORM}/mkl/include)
+ set(MKL_LINK_DIRS) # incompleted
+ set(MKL_REDIST_SO_DIRS) # incompleted
+ endif()
+ include_directories(${MKL_INCLUDE_DIRS})
+ link_directories(${MKL_LINK_DIRS})
+ if (tensorflow_ENABLE_MKLDNN_SUPPORT)
+ include(mkldnn)
+ list(APPEND tensorflow_EXTERNAL_LIBRARIES ${mkldnn_STATIC_LIBRARIES})
+ list(APPEND tensorflow_EXTERNAL_DEPENDENCIES mkldnn)
+ include_directories(${mkldnn_INCLUDE_DIRS})
+ endif()
+endif (tensorflow_ENABLE_MKL_SUPPORT)
+
if (tensorflow_ENABLE_GPU)
if (NOT WIN32)
# Default install paths for cuda libraries in Linux
diff --git a/tensorflow/contrib/cmake/README.md b/tensorflow/contrib/cmake/README.md
index fe83bb3204..0b79f718d4 100644
--- a/tensorflow/contrib/cmake/README.md
+++ b/tensorflow/contrib/cmake/README.md
@@ -128,6 +128,18 @@ Step-by-step Windows build
D:\local\cuda\bin
```
+ * When building with MKL support after installing [MKL](https://software.intel.com/en-us/mkl) from INTEL, append its bin directories to your PATH environment variable.
+
+ In case TensorFlow fails to find the MKL dll's during initialization, check your PATH environment variable.
+ It should contain the directory of the MKL dlls. For example:
+
+ ```
+ D:\Tools\IntelSWTools\compilers_and_libraries\windows\redist\intel64\mkl
+ D:\Tools\IntelSWTools\compilers_and_libraries\windows\redist\intel64\compiler
+ D:\Tools\IntelSWTools\compilers_and_libraries\windows\redist\intel64\tbb\vc_mt
+ ```
+
+
* We assume that `cmake` and `git` are installed and in your `%PATH%`. If
for example `cmake` is not in your path and it is installed in
`C:\Program Files (x86)\CMake\bin\cmake.exe`, you can add this directory
@@ -166,7 +178,15 @@ Step-by-step Windows build
More? -Dtensorflow_ENABLE_GPU=ON ^
More? -DCUDNN_HOME="D:\...\cudnn"
```
+ To build with MKL support add "^" at the end of the last line above following with:
+
+ ```
+ More? -Dtensorflow_ENABLE_MKL_SUPPORT=ON ^
+ More? -DMKL_HOME="D:\...\compilers_and_libraries"
+ ```
+
To enable SIMD instructions with MSVC, as AVX and SSE, define it as follows:
+
```
More? -Dtensorflow_WIN_CPU_SIMD_OPTIONS=/arch:AVX
```
@@ -226,6 +246,7 @@ Step-by-step Windows build
```
ctest -C RelWithDebInfo
```
+
* `-Dtensorflow_BUILD_MORE_PYTHON_TESTS=(ON|OFF)`. Defaults to `OFF`. This enables python tests on
serveral major packages. This option is only valid if this and tensorflow_BUILD_PYTHON_TESTS are both set as `ON`.
After building the python wheel, you need to install the new wheel before running the tests.
@@ -234,6 +255,12 @@ Step-by-step Windows build
ctest -C RelWithDebInfo
```
+ * `-Dtensorflow_ENABLE_MKL_SUPPORT=(ON|OFF)`. Defaults to `OFF`. Include MKL support. If MKL is enabled you need to install the [Intel Math Kernal Library](https://software.intel.com/en-us/mkl).
+ CMake will expect the location of MKL in -MKL_HOME=path_you_install_mkl.
+
+ * `-Dtensorflow_ENABLE_MKLDNN_SUPPORT=(ON|OFF)`. Defaults to `OFF`. Include MKL DNN support. MKL DNN is [Intel(R) Math Kernel Library for Deep Neural Networks (Intel(R) MKL-DNN)](https://github.com/intel/mkl-dnn). You have to add `-Dtensorflow_ENABLE_MKL_SUPPORT=ON` before including MKL DNN support.
+
+
4. Invoke MSBuild to build TensorFlow.
To build the C++ example program, which will be created as a `.exe`
@@ -251,6 +278,7 @@ Step-by-step Windows build
D:\...\build> MSBuild /p:Configuration=Release tf_python_build_pip_package.vcxproj
```
+
Linux Continuous Integration build
==================================
diff --git a/tensorflow/contrib/cmake/external/mkldnn.cmake b/tensorflow/contrib/cmake/external/mkldnn.cmake
new file mode 100644
index 0000000000..a639fdee36
--- /dev/null
+++ b/tensorflow/contrib/cmake/external/mkldnn.cmake
@@ -0,0 +1,44 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+include (ExternalProject)
+
+set(mkldnn_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR}/mkldnn/src/mkldnn/include)
+set(mkldnn_URL https://github.com/01org/mkl-dnn.git)
+set(mkldnn_BUILD ${CMAKE_CURRENT_BINARY_DIR}/mkldnn/src/mkldnn/src)
+set(mkldnn_TAG 3063b2e4c943983f6bf5f2fb9a490d4a998cd291)
+
+if(WIN32)
+ if(${CMAKE_GENERATOR} MATCHES "Visual Studio.*")
+ set(mkldnn_STATIC_LIBRARIES ${CMAKE_CURRENT_BINARY_DIR}/mkldnn/src/mkldnn/src/Release/mkldnn.lib)
+ else()
+ set(mkldnn_STATIC_LIBRARIES ${CMAKE_CURRENT_BINARY_DIR}/mkldnn/src/mkldnn/src/mkldnn.lib)
+ endif()
+else()
+ set(mkldnn_STATIC_LIBRARIES ${CMAKE_CURRENT_BINARY_DIR}/mkldnn/src/mkldnn/src/libmkldnn.a)
+endif()
+
+ExternalProject_Add(mkldnn
+ PREFIX mkldnn
+ GIT_REPOSITORY ${mkldnn_URL}
+ GIT_TAG ${mkldnn_TAG}
+ DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
+ BUILD_IN_SOURCE 1
+ BUILD_BYPRODUCTS ${mkldnn_STATIC_LIBRARIES}
+ INSTALL_COMMAND ""
+ CMAKE_CACHE_ARGS
+ -DCMAKE_BUILD_TYPE:STRING=Release
+ -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
+ -DMKLINC:STRING=${MKL_INCLUDE_DIRS}
+)
diff --git a/tensorflow/core/common_runtime/mkl_cpu_allocator.h b/tensorflow/core/common_runtime/mkl_cpu_allocator.h
index b2ef51d10b..245320c896 100644
--- a/tensorflow/core/common_runtime/mkl_cpu_allocator.h
+++ b/tensorflow/core/common_runtime/mkl_cpu_allocator.h
@@ -31,6 +31,10 @@ limitations under the License.
#include "i_malloc.h"
+#ifdef _WIN32
+typedef unsigned int uint;
+#endif
+
namespace tensorflow {
class MklSubAllocator : public SubAllocator {
diff --git a/tensorflow/core/graph/mkl_tfconversion_pass.h b/tensorflow/core/graph/mkl_tfconversion_pass.h
index 0562d8b3cd..84e50ee6e0 100644
--- a/tensorflow/core/graph/mkl_tfconversion_pass.h
+++ b/tensorflow/core/graph/mkl_tfconversion_pass.h
@@ -24,6 +24,10 @@ limitations under the License.
#include <memory>
#include "tensorflow/core/graph/graph.h"
+#ifdef _WIN32
+typedef unsigned int uint;
+#endif
+
namespace tensorflow {
// Interface to invoke the pass for unit test
//
diff --git a/tensorflow/core/kernels/mkl_relu_op.cc b/tensorflow/core/kernels/mkl_relu_op.cc
index 0a0f69522f..1ed43834dd 100644
--- a/tensorflow/core/kernels/mkl_relu_op.cc
+++ b/tensorflow/core/kernels/mkl_relu_op.cc
@@ -441,7 +441,9 @@ class MklReluOpBase : public OpKernel {
// Allocate output and MklDnnShape tensors separately for possible
// in-place operation
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
- {src_index}, dst_index, tf_shape_dst, &dst_tensor));
+ {static_cast<const int>(src_index)},
+ static_cast<const int>(dst_index),
+ tf_shape_dst, &dst_tensor));
AllocateOutputSetMklShape(context, dst_index, dnn_shape_dst);
// Destination memory descriptor is same as source memory descriptor.
@@ -611,7 +613,9 @@ class MklReluGradOpBase : public OpKernel {
// Allocate diff_src and MklDnnShape tensors separately for possible
// in-place operation
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
- {diff_dst_index}, diff_src_index, tf_shape_diff_src,
+ {static_cast<const int>(diff_dst_index)},
+ static_cast<const int>(diff_src_index),
+ tf_shape_diff_src,
&diff_src_tensor));
AllocateOutputSetMklShape(context, diff_src_index, dnn_shape_diff_src);
diff --git a/tensorflow/core/util/mkl_util.h b/tensorflow/core/util/mkl_util.h
index 9f58e40d94..bc6d2d77a4 100644
--- a/tensorflow/core/util/mkl_util.h
+++ b/tensorflow/core/util/mkl_util.h
@@ -45,6 +45,10 @@ using mkldnn::primitive;
using mkldnn::reorder;
#endif
+#ifdef _WIN32
+typedef unsigned int uint;
+#endif
+
// The file contains a number of utility classes and functions used by MKL
// enabled kernels