aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-08-10 22:34:17 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-10 22:38:34 -0700
commita8e78e2e617b6ca10f4878fe99fdf43ddedfa7c6 (patch)
treed6d7d12dc263afbd4faadc034753c498f574fc93 /tensorflow
parentf97e945914e83bed8cc2d51a27f1394719f0e7b0 (diff)
Rename MKL-related feature macros.
The existing feature macros are named INTEL_MKL to indicate that any flavor of MKL is available, INTEL_MKL_ML to indicate that *only* MKL-ML is available (i.e. MKL-DNN is not), and DO_NOT_USE_ML to indicate that *only* MKL-DNN is available (i.e. MKL-ML is not). This change renames INTEL_MKL_ML to INTEL_MKL_ML_ONLY and DO_NOT_USE_ML to INTEL_MKL_DNN_ONLY. The meanings of the macros have not changed. This change also adds a few sanity checks to mkl_util.h that ensures that the combination of INTEL_MKL, INTEL_MKL_ML_ONLY, and INTEL_MKL_DNN_ONLY is logically consistent: the *_ONLY macros may not both be defined, and if either of them is defined, bare INTEL_MKL must also be defined. PiperOrigin-RevId: 208313735
Diffstat (limited to 'tensorflow')
-rw-r--r--tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.cc2
-rw-r--r--tensorflow/core/common_runtime/mkl_cpu_allocator.h4
-rw-r--r--tensorflow/core/graph/mkl_layout_pass.cc6
-rw-r--r--tensorflow/core/graph/mkl_layout_pass_test.cc6
-rw-r--r--tensorflow/core/kernels/batch_matmul_op_complex.cc2
-rw-r--r--tensorflow/core/kernels/batch_matmul_op_real.cc2
-rw-r--r--tensorflow/core/kernels/matmul_op.cc4
-rw-r--r--tensorflow/core/kernels/mkl_aggregate_ops.cc7
-rw-r--r--tensorflow/core/kernels/mkl_avgpooling_op.cc6
-rw-r--r--tensorflow/core/kernels/mkl_batch_matmul_op.cc2
-rw-r--r--tensorflow/core/kernels/mkl_concat_op.cc7
-rw-r--r--tensorflow/core/kernels/mkl_conv_grad_bias_ops.cc6
-rw-r--r--tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc9
-rw-r--r--tensorflow/core/kernels/mkl_conv_grad_input_ops.cc10
-rw-r--r--tensorflow/core/kernels/mkl_conv_ops.cc6
-rw-r--r--tensorflow/core/kernels/mkl_conv_ops.h7
-rw-r--r--tensorflow/core/kernels/mkl_fused_batch_norm_op.cc7
-rw-r--r--tensorflow/core/kernels/mkl_identity_op.cc6
-rw-r--r--tensorflow/core/kernels/mkl_input_conversion_op.cc4
-rw-r--r--tensorflow/core/kernels/mkl_lrn_op.cc6
-rw-r--r--tensorflow/core/kernels/mkl_matmul_op.cc8
-rw-r--r--tensorflow/core/kernels/mkl_maxpooling_op.cc6
-rw-r--r--tensorflow/core/kernels/mkl_pooling_ops_common.cc6
-rw-r--r--tensorflow/core/kernels/mkl_pooling_ops_common.h8
-rw-r--r--tensorflow/core/kernels/mkl_relu_op.cc12
-rw-r--r--tensorflow/core/kernels/mkl_reshape_op.cc7
-rw-r--r--tensorflow/core/kernels/mkl_softmax_op.cc4
-rw-r--r--tensorflow/core/kernels/mkl_tfconv_op.h6
-rw-r--r--tensorflow/core/kernels/mkl_transpose_op.cc20
-rw-r--r--tensorflow/core/ops/nn_ops.cc10
-rw-r--r--tensorflow/core/util/mkl_util.h48
-rw-r--r--tensorflow/core/util/mkl_util_test.cc4
-rw-r--r--tensorflow/tensorflow.bzl2
33 files changed, 125 insertions, 125 deletions
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.cc b/tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.cc
index 997fdd2ab3..8dc5f3c93b 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.cc
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#if defined(INTEL_MKL) && !defined(DO_NOT_USE_ML)
+#if defined(INTEL_MKL) && !defined(INTEL_MKL_DNN_ONLY)
#include "tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.h"
#include "third_party/intel_mkl_ml/include/mkl_cblas.h"
#include "third_party/intel_mkl_ml/include/mkl_service.h"
diff --git a/tensorflow/core/common_runtime/mkl_cpu_allocator.h b/tensorflow/core/common_runtime/mkl_cpu_allocator.h
index 94e10dbfa2..99bd43e090 100644
--- a/tensorflow/core/common_runtime/mkl_cpu_allocator.h
+++ b/tensorflow/core/common_runtime/mkl_cpu_allocator.h
@@ -28,7 +28,7 @@ limitations under the License.
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/mem.h"
-#ifndef DO_NOT_USE_ML
+#ifndef INTEL_MKL_DNN_ONLY
#include "i_malloc.h"
#endif
@@ -98,7 +98,7 @@ class MklCPUAllocator : public VisitableAllocator {
VLOG(1) << "MklCPUAllocator: Setting max_mem_bytes: " << max_mem_bytes;
allocator_ = new BFCAllocator(new MklSubAllocator, max_mem_bytes,
kAllowGrowth, kName);
-#ifndef DO_NOT_USE_ML
+#ifndef INTEL_MKL_DNN_ONLY
// For redirecting all allocations from MKL to this allocator
// From: http://software.intel.com/en-us/node/528565
i_malloc = MallocHook;
diff --git a/tensorflow/core/graph/mkl_layout_pass.cc b/tensorflow/core/graph/mkl_layout_pass.cc
index c22e0a3872..5683944e46 100644
--- a/tensorflow/core/graph/mkl_layout_pass.cc
+++ b/tensorflow/core/graph/mkl_layout_pass.cc
@@ -43,7 +43,7 @@ limitations under the License.
namespace tensorflow {
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
// This pass implements rewriting of graph to support following scenarios:
// (A) Merging nodes in the graph
@@ -2211,7 +2211,7 @@ Status MklLayoutRewritePass::Run(const GraphOptimizationPassOptions& options) {
return Status::OK();
}
-#else // INTEL_MKL_ML
+#else // INTEL_MKL_ML_ONLY
// This pass implements rewriting of graph to support following scenarios:
// (A) Merging nodes in the graph
@@ -4474,7 +4474,7 @@ Status MklLayoutRewritePass::Run(const GraphOptimizationPassOptions& options) {
return Status::OK();
}
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
} // namespace tensorflow
#endif
diff --git a/tensorflow/core/graph/mkl_layout_pass_test.cc b/tensorflow/core/graph/mkl_layout_pass_test.cc
index a41f5861af..e8bac847e5 100644
--- a/tensorflow/core/graph/mkl_layout_pass_test.cc
+++ b/tensorflow/core/graph/mkl_layout_pass_test.cc
@@ -37,7 +37,7 @@ limitations under the License.
namespace tensorflow {
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
namespace {
@@ -1898,7 +1898,7 @@ BENCHMARK(BM_MklLayoutRewritePass)->Arg(1000)->Arg(10000);
} // namespace
-#else // INTEL_MKL_ML
+#else // INTEL_MKL_ML_ONLY
// NOTE: Unit tests in this file rely on a topological sorted graph for
// printing. But since sibling nodes of a node in the topologically sorted graph
@@ -3582,7 +3582,7 @@ BENCHMARK(BM_MklLayoutRewritePass)->Arg(1000)->Arg(10000);
} // namespace
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/batch_matmul_op_complex.cc b/tensorflow/core/kernels/batch_matmul_op_complex.cc
index b77c80c01f..54c45bfe63 100644
--- a/tensorflow/core/kernels/batch_matmul_op_complex.cc
+++ b/tensorflow/core/kernels/batch_matmul_op_complex.cc
@@ -17,7 +17,7 @@ limitations under the License.
namespace tensorflow {
-#if !defined(INTEL_MKL) || defined(DO_NOT_USE_ML)
+#if !defined(INTEL_MKL) || defined(INTEL_MKL_DNN_ONLY)
TF_CALL_complex64(REGISTER_BATCH_MATMUL_CPU);
TF_CALL_complex128(REGISTER_BATCH_MATMUL_CPU);
#endif
diff --git a/tensorflow/core/kernels/batch_matmul_op_real.cc b/tensorflow/core/kernels/batch_matmul_op_real.cc
index aa7a2752e8..584b507c70 100644
--- a/tensorflow/core/kernels/batch_matmul_op_real.cc
+++ b/tensorflow/core/kernels/batch_matmul_op_real.cc
@@ -21,7 +21,7 @@ limitations under the License.
namespace tensorflow {
-#if !defined(INTEL_MKL) || defined(DO_NOT_USE_ML)
+#if !defined(INTEL_MKL) || defined(INTEL_MKL_DNN_ONLY)
TF_CALL_float(REGISTER_BATCH_MATMUL_CPU);
TF_CALL_double(REGISTER_BATCH_MATMUL_CPU);
#endif
diff --git a/tensorflow/core/kernels/matmul_op.cc b/tensorflow/core/kernels/matmul_op.cc
index 5d4737549b..79967aab38 100644
--- a/tensorflow/core/kernels/matmul_op.cc
+++ b/tensorflow/core/kernels/matmul_op.cc
@@ -598,11 +598,11 @@ TF_CALL_float(REGISTER_CPU_EIGEN);
// to use only opensource MKL DNN then use default implementation for these
// types otherwise use GEMM from MKL ML binary
-#if defined(DO_NOT_USE_ML)
+#if defined(INTEL_MKL_DNN_ONLY)
TF_CALL_complex64(REGISTER_CPU);
TF_CALL_complex128(REGISTER_CPU);
TF_CALL_double(REGISTER_CPU);
-#else // DO_NOT_USE_ML
+#else // INTEL_MKL_DNN_ONLY
TF_CALL_complex64(REGISTER_CPU_EIGEN);
TF_CALL_complex128(REGISTER_CPU_EIGEN);
TF_CALL_double(REGISTER_CPU_EIGEN);
diff --git a/tensorflow/core/kernels/mkl_aggregate_ops.cc b/tensorflow/core/kernels/mkl_aggregate_ops.cc
index 3d04aeeb3e..28edf51546 100644
--- a/tensorflow/core/kernels/mkl_aggregate_ops.cc
+++ b/tensorflow/core/kernels/mkl_aggregate_ops.cc
@@ -24,8 +24,7 @@ limitations under the License.
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/logging.h"
-
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::stream;
using mkldnn::sum;
@@ -38,7 +37,7 @@ using mkldnn::sum;
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
template <typename Device, typename T>
class MklAddNOp : public OpKernel {
@@ -286,7 +285,7 @@ class MklAddNOp : public OpKernel {
} MklAddNOpContext;
};
-#else // INTEL_MKL_ML
+#else // INTEL_MKL_ML_ONLY
template <typename Device, typename T>
class MklAddNOp : public OpKernel {
public:
diff --git a/tensorflow/core/kernels/mkl_avgpooling_op.cc b/tensorflow/core/kernels/mkl_avgpooling_op.cc
index d3566c2e37..969baecc51 100644
--- a/tensorflow/core/kernels/mkl_avgpooling_op.cc
+++ b/tensorflow/core/kernels/mkl_avgpooling_op.cc
@@ -24,7 +24,7 @@
#include "tensorflow/core/kernels/mkl_pooling_ops_common.h"
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::algorithm;
using mkldnn::engine;
@@ -40,7 +40,7 @@ namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
template <typename Device, typename T>
class MklAvgPoolingOp : public OpKernel {
@@ -664,7 +664,7 @@ class MklAvgPoolingGradOp : public MklPoolingBackwardOpBase<T> {
}
}; // MklAvgPoolingGradOp
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
REGISTER_KERNEL_BUILDER(Name("_MklAvgPool")
.Device(DEVICE_CPU)
diff --git a/tensorflow/core/kernels/mkl_batch_matmul_op.cc b/tensorflow/core/kernels/mkl_batch_matmul_op.cc
index 45328b03d6..0841395dc3 100644
--- a/tensorflow/core/kernels/mkl_batch_matmul_op.cc
+++ b/tensorflow/core/kernels/mkl_batch_matmul_op.cc
@@ -25,7 +25,7 @@ limitations under the License.
#define EIGEN_USE_THREADS
-#if defined(INTEL_MKL) && !defined(DO_NOT_USE_ML)
+#if defined(INTEL_MKL) && !defined(INTEL_MKL_DNN_ONLY)
#include <vector>
#include "mkl_cblas.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
diff --git a/tensorflow/core/kernels/mkl_concat_op.cc b/tensorflow/core/kernels/mkl_concat_op.cc
index d8efb1be3e..8ad7ebb51f 100644
--- a/tensorflow/core/kernels/mkl_concat_op.cc
+++ b/tensorflow/core/kernels/mkl_concat_op.cc
@@ -27,8 +27,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
-
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::concat;
@@ -64,7 +63,7 @@ class EigenConcatBaseOp : public OpKernel {
// we need to have empty Compute because Compute is pure virtual function.
void Compute(OpKernelContext* c) {}
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
void Compute(OpKernelContext* c, const std::vector<Tensor>& values) {
const Tensor* concat_dim_tensor;
@@ -232,7 +231,7 @@ class EigenConcatBaseOp : public OpKernel {
#endif
};
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
// --------------------------------------------------------------------------
// Mkl Concat Op
diff --git a/tensorflow/core/kernels/mkl_conv_grad_bias_ops.cc b/tensorflow/core/kernels/mkl_conv_grad_bias_ops.cc
index f857be6c32..7c687f6581 100644
--- a/tensorflow/core/kernels/mkl_conv_grad_bias_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_grad_bias_ops.cc
@@ -18,7 +18,7 @@ limitations under the License.
// bias.
#ifdef INTEL_MKL
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
#define USE_EIGEN_TENSOR
#define EIGEN_USE_THREADS
@@ -39,7 +39,7 @@ limitations under the License.
#include "tensorflow/core/util/use_cudnn.h"
#include "tensorflow/core/util/work_sharder.h"
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#endif
@@ -265,5 +265,5 @@ class MklConv2DCustomBackpropBiasOp : public OpKernel {
TF_CALL_float(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
} /* namespace tensorflow */
-#endif /* INTEL_MKL_ML */
+#endif /* INTEL_MKL_ML_ONLY */
#endif /* INTEL_MKL */
diff --git a/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc b/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc
index b73a119a88..50c25e1da7 100644
--- a/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc
@@ -38,8 +38,7 @@ limitations under the License.
#include "tensorflow/core/util/use_cudnn.h"
#include "tensorflow/core/util/work_sharder.h"
-
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::convolution_backward_weights;
@@ -56,7 +55,7 @@ using mkldnn::stream;
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
struct MklConvBwdFilterParams {
memory::dims src_dims;
@@ -358,7 +357,7 @@ class MklConv2DBwdFilterPrimitiveFactory : public MklPrimitiveFactory<T> {
#endif
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
template <typename Device, class T>
class MklConv2DCustomBackpropFilterOp : public OpKernel {
@@ -1050,7 +1049,7 @@ class MklConv2DCustomBackpropFilterOp
TF_CALL_float(REGISTER_MKL_FILTER_KERNELS);
#undef REGISTER_MKL_FILTER_KERNELS
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc b/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
index 39498f1a80..38e014d68e 100644
--- a/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
@@ -23,7 +23,7 @@ limitations under the License.
#define EIGEN_USE_THREADS
#include <algorithm>
#include <vector>
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#endif
@@ -46,7 +46,7 @@ limitations under the License.
#include "tensorflow/core/util/use_cudnn.h"
#include "tensorflow/core/util/work_sharder.h"
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::convolution_backward_data;
@@ -57,7 +57,7 @@ using mkldnn::stream;
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
/// utility classes enabling primitive reuse for backward conv2d ops.
struct MklConvBwdInputParams {
@@ -294,7 +294,7 @@ class MklConv2DBwdInputPrimitiveFactory : public MklPrimitiveFactory<T> {
#endif
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
template <typename Device, class T>
class MklConv2DCustomBackpropInputOp : public OpKernel {
@@ -839,7 +839,7 @@ class MklConv2DCustomBackpropInputOp
}
};
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
#define REGISTER_MKL_CPU_KERNELS(T) \
REGISTER_KERNEL_BUILDER(Name("_MklConv2DBackpropInput") \
diff --git a/tensorflow/core/kernels/mkl_conv_ops.cc b/tensorflow/core/kernels/mkl_conv_ops.cc
index 62396eeb8b..bca1aa21a8 100644
--- a/tensorflow/core/kernels/mkl_conv_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_ops.cc
@@ -42,7 +42,7 @@ limitations under the License.
#include "tensorflow/core/util/mkl_util.h"
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::prop_kind;
@@ -57,7 +57,7 @@ using mkldnn::convolution_direct;
namespace tensorflow {
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
// This structure aggregates multiple inputs to Conv2DFwd* methods.
struct MklConvFwdParams {
@@ -329,7 +329,7 @@ class MklConv2DFwdPrimitiveFactory : public MklPrimitiveFactory<T> {
typedef Eigen::ThreadPoolDevice CPUDevice;
// For now, MKL-ML is default. So making MKL-DNN not a default choice.
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
template <typename Device, typename T, bool biasEnabled>
class MklConv2DOp : public OpKernel {
public:
diff --git a/tensorflow/core/kernels/mkl_conv_ops.h b/tensorflow/core/kernels/mkl_conv_ops.h
index 3f154ff33b..838c06f49d 100644
--- a/tensorflow/core/kernels/mkl_conv_ops.h
+++ b/tensorflow/core/kernels/mkl_conv_ops.h
@@ -40,7 +40,7 @@ limitations under the License.
#include "tensorflow/core/util/mkl_util.h"
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::prop_kind;
@@ -52,7 +52,7 @@ using mkldnn::convolution_forward;
namespace tensorflow {
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
class MklDnnConvUtil {
protected:
@@ -397,8 +397,7 @@ class MklConv2DBackpropCommonOp : public OpKernel {
TensorFormat data_format_; // NCHW or NHWC
};
-#endif // INTEL_MKL_ML
-
+#endif // INTEL_MKL_ML_ONLY
/////////////////////////////////////////////////////////////////////
/// Dummy Mkl op that is just used for operators that are intermediate
diff --git a/tensorflow/core/kernels/mkl_fused_batch_norm_op.cc b/tensorflow/core/kernels/mkl_fused_batch_norm_op.cc
index aa572fb0a3..2ec6c8fa89 100644
--- a/tensorflow/core/kernels/mkl_fused_batch_norm_op.cc
+++ b/tensorflow/core/kernels/mkl_fused_batch_norm_op.cc
@@ -21,8 +21,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/util/tensor_format.h"
-
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::batch_normalization_backward;
using mkldnn::batch_normalization_forward;
@@ -41,7 +40,7 @@ using mkldnn::use_scale_shift;
namespace tensorflow {
using CPUDevice = Eigen::ThreadPoolDevice;
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
template <typename Device, typename T>
class MklFusedBatchNormOp : public OpKernel {
@@ -684,7 +683,7 @@ class MklFusedBatchNormGradOp : public OpKernel {
};
#endif
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
struct MklBatchNormFwdParams {
memory::dims src_dims;
diff --git a/tensorflow/core/kernels/mkl_identity_op.cc b/tensorflow/core/kernels/mkl_identity_op.cc
index b02cc5384c..b57e816028 100644
--- a/tensorflow/core/kernels/mkl_identity_op.cc
+++ b/tensorflow/core/kernels/mkl_identity_op.cc
@@ -24,20 +24,20 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#endif
#include "tensorflow/core/util/mkl_util.h"
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
#endif
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
template <typename Device, typename T>
class MklIdentityOp : public OpKernel {
diff --git a/tensorflow/core/kernels/mkl_input_conversion_op.cc b/tensorflow/core/kernels/mkl_input_conversion_op.cc
index dc4da33a06..06ce820ae9 100644
--- a/tensorflow/core/kernels/mkl_input_conversion_op.cc
+++ b/tensorflow/core/kernels/mkl_input_conversion_op.cc
@@ -32,7 +32,7 @@ limitations under the License.
#include "tensorflow/core/kernels/mkl_tfconv_op.h"
#include "tensorflow/core/util/mkl_util.h"
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::stream;
@@ -60,7 +60,7 @@ typedef Eigen::ThreadPoolDevice CPUDevice;
// convert the TF format input to MKL format
///////////////////////////////////////////////////////////
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
template <typename Device, typename T>
class MklInputConversionOp : public OpKernel {
public:
diff --git a/tensorflow/core/kernels/mkl_lrn_op.cc b/tensorflow/core/kernels/mkl_lrn_op.cc
index 7966c271d5..22ff4cd80f 100644
--- a/tensorflow/core/kernels/mkl_lrn_op.cc
+++ b/tensorflow/core/kernels/mkl_lrn_op.cc
@@ -35,7 +35,7 @@ limitations under the License.
#include "tensorflow/core/util/work_sharder.h"
#endif
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::lrn_across_channels;
using mkldnn::lrn_backward;
@@ -69,7 +69,7 @@ void GetBandMatrix(int depth, int depth_radius,
} // namespace
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
template <typename T>
class MklLRNOp : public OpKernel {
@@ -1345,7 +1345,7 @@ class MklLRNGradOp : public OpKernel {
float beta_;
};
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
#define REGISTER_MKL_LRN_CPU(T) \
REGISTER_KERNEL_BUILDER(Name("_MklLRN") \
diff --git a/tensorflow/core/kernels/mkl_matmul_op.cc b/tensorflow/core/kernels/mkl_matmul_op.cc
index fd261433a0..077d62ce32 100644
--- a/tensorflow/core/kernels/mkl_matmul_op.cc
+++ b/tensorflow/core/kernels/mkl_matmul_op.cc
@@ -31,7 +31,7 @@ limitations under the License.
#include "tensorflow/core/kernels/fill_functor.h"
// This header file is part of MKL ML, need equivalent file in MKL DNN
-#ifndef DO_NOT_USE_ML
+#ifndef INTEL_MKL_DNN_ONLY
#include "mkl_cblas.h"
#else
#include "mkldnn.h"
@@ -155,7 +155,7 @@ class MklMatMulOp : public OpKernel {
// 1.0 and 0.0 respectively.
const float alpha = 1.0f;
const float beta = 0.0f;
-#if defined(DO_NOT_USE_ML)
+#if defined(INTEL_MKL_DNN_ONLY)
const char* const ftrans[] = {"N", "T", "C"};
int index_transa = transa ? 1 : 0;
int index_transb = transb ? 1 : 0;
@@ -173,7 +173,7 @@ class MklMatMulOp : public OpKernel {
}
// MKLDNN only supports SGEMM
-#ifndef DO_NOT_USE_ML
+#ifndef INTEL_MKL_DNN_ONLY
// Matrix-Matrix Multiplication with FP64 tensors. For detailed info about
// parameters, look at FP32 function description.
@@ -229,7 +229,7 @@ class MklMatMulOp : public OpKernel {
// additional types
TF_CALL_float(REGISTER_CPU);
-#ifndef DO_NOT_USE_ML
+#ifndef INTEL_MKL_DNN_ONLY
TF_CALL_double(REGISTER_CPU);
TF_CALL_complex64(REGISTER_CPU);
TF_CALL_complex128(REGISTER_CPU);
diff --git a/tensorflow/core/kernels/mkl_maxpooling_op.cc b/tensorflow/core/kernels/mkl_maxpooling_op.cc
index 0a2151566e..e149f003e5 100644
--- a/tensorflow/core/kernels/mkl_maxpooling_op.cc
+++ b/tensorflow/core/kernels/mkl_maxpooling_op.cc
@@ -22,7 +22,7 @@ limitations under the License.
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/util/padding.h"
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include <algorithm>
#include "mkldnn.hpp"
using mkldnn::algorithm;
@@ -40,7 +40,7 @@ namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
// MKL-DNN is now default. MKL-ML must be specified explicitly.
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
// An implementation of MaxPooling (forward).
template <typename Device, typename T>
@@ -817,7 +817,7 @@ class MklMaxPoolingGradOp : public MklPoolingBackwardOpBase<T> {
}
}; // MklMaxPoolingGradOp
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
REGISTER_KERNEL_BUILDER(Name("_MklMaxPool")
.Device(DEVICE_CPU)
diff --git a/tensorflow/core/kernels/mkl_pooling_ops_common.cc b/tensorflow/core/kernels/mkl_pooling_ops_common.cc
index 915878d9ea..d7ad3f9dcd 100644
--- a/tensorflow/core/kernels/mkl_pooling_ops_common.cc
+++ b/tensorflow/core/kernels/mkl_pooling_ops_common.cc
@@ -223,7 +223,7 @@ void MklPoolParameters::Init(OpKernelContext* context,
Init(context, ksize, stride, padding, data_format);
}
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
// Initialization for MKL format
void MklPoolParameters::Init(OpKernelContext* context,
const std::vector<int32>& ksize,
@@ -253,7 +253,7 @@ void MklPoolParameters::Init(OpKernelContext* context,
Init(context, ksize, stride, padding, data_format);
}
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
// Common Initialization for TensorFlow and MKL formats
void MklPoolParameters::Init(OpKernelContext* context,
const std::vector<int32>& ksize,
@@ -288,7 +288,7 @@ void MklPoolParameters::Init(OpKernelContext* context,
OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose(
tensor_in_cols, window_cols, col_stride,
padding, &out_width, &pad_left, &pad_right));
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
// TF can work with int64, but mkldnn only supports int32
// Fail if the height or width are greater than MAX_INT
diff --git a/tensorflow/core/kernels/mkl_pooling_ops_common.h b/tensorflow/core/kernels/mkl_pooling_ops_common.h
index 3a3de1c58b..ec7af5092d 100644
--- a/tensorflow/core/kernels/mkl_pooling_ops_common.h
+++ b/tensorflow/core/kernels/mkl_pooling_ops_common.h
@@ -22,7 +22,7 @@ limitations under the License.
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/util/padding.h"
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::memory;
using mkldnn::pooling_backward;
@@ -405,7 +405,7 @@ struct MklPoolParameters {
void Init(OpKernelContext* context, const std::vector<int32>& ksize,
const std::vector<int32>& stride, Padding padding,
TensorFormat data_format, const TensorShape& tensor_in_shape);
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
void Init(OpKernelContext* context, const std::vector<int32>& ksize,
const std::vector<int32>& stride, Padding padding,
TensorFormat data_format, const MklShape* mkl_in_shape);
@@ -422,7 +422,7 @@ struct MklPoolParameters {
TensorFormat data_format);
};
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
template <class T>
class MklPoolingOpBase : public OpKernel {
@@ -674,7 +674,7 @@ class MklPoolingBackwardOpBase : public MklPoolingOpBase<T> {
return grad_reorder_needed ? target_diff_dst_md : original_input_grad_md;
}
};
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
//-------------------------------------------------------------------
// Utility functions
diff --git a/tensorflow/core/kernels/mkl_relu_op.cc b/tensorflow/core/kernels/mkl_relu_op.cc
index 78abbdb730..05034894e5 100644
--- a/tensorflow/core/kernels/mkl_relu_op.cc
+++ b/tensorflow/core/kernels/mkl_relu_op.cc
@@ -23,8 +23,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/errors.h"
-
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::algorithm;
@@ -58,7 +57,7 @@ struct MklReluHelpers {
}
};
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
template <typename Device, typename T>
class MklReluOp : public OpKernel {
@@ -368,10 +367,7 @@ void MklReluGradOp<Device, T>::Compute(OpKernelContext* context) {
mkl_context.MklCleanup();
}
-
-
-#else // INTEL_MKL_ML
-
+#else // INTEL_MKL_ML_ONLY
template <typename Device, typename T, algorithm alg_kind>
class MklReluOpBase : public OpKernel {
@@ -874,7 +870,7 @@ class MklTanhGradOp : public MklReluGradOpBase<Device, T, eltwise_tanh> {
MklReluGradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_RELU_MKL_SUPPORTED_KERNELS_TYPES);
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
// register dnn kernels for supported operations and supported types
#define REGISTER_ELU_MKL_SUPPORTED_KERNELS_TYPES(type) \
diff --git a/tensorflow/core/kernels/mkl_reshape_op.cc b/tensorflow/core/kernels/mkl_reshape_op.cc
index 9c536df215..d9a7893a53 100644
--- a/tensorflow/core/kernels/mkl_reshape_op.cc
+++ b/tensorflow/core/kernels/mkl_reshape_op.cc
@@ -24,8 +24,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
-
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
using mkldnn::stream;
#else
@@ -42,7 +41,7 @@ class MklReshapeOp : public OpKernel {
public:
explicit MklReshapeOp(OpKernelConstruction* context) : OpKernel(context) {}
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
void Compute(OpKernelContext* context) override {
const Tensor& input = MklGetInput(context, 0);
const Tensor& sizes = MklGetInput(context, 1);
@@ -317,7 +316,7 @@ class MklReshapeOp : public OpKernel {
}
}
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
private:
const int kInputSlotIdx = 0;
diff --git a/tensorflow/core/kernels/mkl_softmax_op.cc b/tensorflow/core/kernels/mkl_softmax_op.cc
index 638392954e..8bde966be9 100644
--- a/tensorflow/core/kernels/mkl_softmax_op.cc
+++ b/tensorflow/core/kernels/mkl_softmax_op.cc
@@ -15,7 +15,7 @@ limitations under the License.
// See docs in ../ops/nn_ops.cc.
#ifdef INTEL_MKL
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/numeric_op.h"
@@ -153,5 +153,5 @@ TF_CALL_float(REGISTER_SOFTMAX_MKL_SUPPORTED_KERNELS_TYPES);
} // namespace tensorflow
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
#endif // INTEL_MKL
diff --git a/tensorflow/core/kernels/mkl_tfconv_op.h b/tensorflow/core/kernels/mkl_tfconv_op.h
index a9e92f6638..894c2e34e8 100644
--- a/tensorflow/core/kernels/mkl_tfconv_op.h
+++ b/tensorflow/core/kernels/mkl_tfconv_op.h
@@ -32,13 +32,13 @@ limitations under the License.
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/tensor_format.h"
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#endif
#include "tensorflow/core/util/mkl_util.h"
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
using mkldnn::stream;
#endif
@@ -64,7 +64,7 @@ class MklToTfOp : public OpKernel {
VLOG(1) << "MKLToTFConversion complete successfully.";
}
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
static void ConvertMklToTf(OpKernel* op_kernel, OpKernelContext* context,
string data_format_str, DataType op_data_type,
bool has_avx512f, uint input_number) {
diff --git a/tensorflow/core/kernels/mkl_transpose_op.cc b/tensorflow/core/kernels/mkl_transpose_op.cc
index 109e634feb..6bbe271c54 100644
--- a/tensorflow/core/kernels/mkl_transpose_op.cc
+++ b/tensorflow/core/kernels/mkl_transpose_op.cc
@@ -18,14 +18,14 @@ limitations under the License.
#if defined(INTEL_MKL)
#define EIGEN_USE_THREADS
-#if !defined(DO_NOT_USE_ML)
+#if !defined(INTEL_MKL_DNN_ONLY)
#include "mkl_trans.h"
#endif
#include "tensorflow/core/kernels/transpose_functor.h"
#include "tensorflow/core/kernels/transpose_op.h"
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
#include "tensorflow/core/util/mkl_util.h"
@@ -50,7 +50,7 @@ namespace tensorflow {
// REQUIRES: perm is a permutation.
namespace {
-#if !defined(DO_NOT_USE_ML)
+#if !defined(INTEL_MKL_DNN_ONLY)
template <typename T>
Status MKLTranspose2D(const char trans, const Tensor& in, Tensor* out);
@@ -104,9 +104,9 @@ Status MKLTranspose2D<complex128>(const char trans, const Tensor& in,
static const char kMKLTranspose = 'T';
static const char kMKLConjugateTranspose = 'C';
-#endif // if !defined(DO_NOT_USE_ML)
+#endif // if !defined(INTEL_MKL_DNN_ONLY)
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
// MKL-DNN based Transpose implementation
template <typename T>
Status MKLTransposeND(OpKernelContext* ctx, const Tensor& in, Tensor* out,
@@ -154,14 +154,14 @@ Status MKLTransposeND(OpKernelContext* context, const Tensor& in_tensor,
return errors::Aborted("Operation received an exception:", error_msg);
}
}
-#endif // #ifndef INTEL_MKL_ML
+#endif // #ifndef INTEL_MKL_ML_ONLY
} // namespace
Status MklTransposeCpuOp::DoTranspose(OpKernelContext* ctx, const Tensor& in,
gtl::ArraySlice<int32> perm,
Tensor* out) {
-#if !defined(DO_NOT_USE_ML)
+#if !defined(INTEL_MKL_DNN_ONLY)
if (in.dims() == 2) {
if (perm[0] == 0 && perm[1] == 1) {
return Status::OK();
@@ -181,7 +181,7 @@ Status MklTransposeCpuOp::DoTranspose(OpKernelContext* ctx, const Tensor& in,
}
#endif
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
// MKL-DNN has limit on the maximum number of dimensions in a tensor.
// Fallback to Eigen for not supported cases.
if (in.dims() <= TENSOR_MAX_DIMS) {
@@ -206,7 +206,7 @@ Status MklConjugateTransposeCpuOp::DoTranspose(OpKernelContext* ctx,
const Tensor& in,
gtl::ArraySlice<int32> perm,
Tensor* out) {
-#if !defined(DO_NOT_USE_ML)
+#if !defined(INTEL_MKL_DNN_ONLY)
if (in.dims() == 2 && perm[0] == 1 && perm[1] == 0) {
// TODO(rmlarsen): By setting lda and ldb, we could use the MKL kernels
// for any transpose that can be reduced to swapping the last two
@@ -227,7 +227,7 @@ Status MklConjugateTransposeCpuOp::DoTranspose(OpKernelContext* ctx,
}
#endif
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
// MKL-DNN has limit on the maximum number of dimensions in a tensor.
// Fallback to Eigen for not supported cases.
if (in.dims() <= TENSOR_MAX_DIMS) {
diff --git a/tensorflow/core/ops/nn_ops.cc b/tensorflow/core/ops/nn_ops.cc
index f947d4c30d..e0f25fb4ef 100644
--- a/tensorflow/core/ops/nn_ops.cc
+++ b/tensorflow/core/ops/nn_ops.cc
@@ -1687,7 +1687,7 @@ NOTE Do not invoke this operator directly in Python. Graph rewrite pass is
expected to invoke these operators.
)doc");
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
REGISTER_OP("_MklConv2DWithBiasBackpropBias")
.Input("out_backprop: T")
.Input("mkl_out_backprop: uint8")
@@ -1849,7 +1849,7 @@ REGISTER_OP("_MklMaxPool")
.Input("input: T")
.Input("mkl_input: uint8")
.Output("output: T")
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
.Output("workspace: T")
#else
.Output("workspace: uint8")
@@ -1875,7 +1875,7 @@ REGISTER_OP("_MklMaxPoolGrad")
.Input("orig_input: T")
.Input("orig_output: T")
.Input("grad: T")
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
.Input("workspace: T")
#else
.Input("workspace: uint8")
@@ -1947,7 +1947,7 @@ REGISTER_OP("_MklLRN")
.Input("input: T")
.Input("mkl_input: uint8")
.Output("output: T")
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
.Output("workspace: T")
#else
.Output("workspace: uint8")
@@ -1975,7 +1975,7 @@ REGISTER_OP("_MklLRNGrad")
.Input("input_grads: T")
.Input("input_image: T")
.Input("output_image: T")
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
.Input("workspace: T")
#else
.Input("workspace: uint8")
diff --git a/tensorflow/core/util/mkl_util.h b/tensorflow/core/util/mkl_util.h
index a66b1215bd..159a787d05 100644
--- a/tensorflow/core/util/mkl_util.h
+++ b/tensorflow/core/util/mkl_util.h
@@ -22,7 +22,17 @@ limitations under the License.
#include <utility>
#include <vector>
-#ifdef INTEL_MKL_ML
+#if defined(INTEL_MKL_ML_ONLY) || defined(INTEL_MKL_DNN_ONLY)
+#ifndef INTEL_MKL
+#error "INTEL_MKL_{ML,DNN}_ONLY require INTEL_MKL"
+#endif
+#endif
+
+#if defined(INTEL_MKL_ML_ONLY) && defined(INTEL_MKL_DNN_ONLY)
+#error "at most one of INTEL_MKL_ML_ONLY and INTEL_MKL_DNN_ONLY may be defined"
+#endif
+
+#ifdef INTEL_MKL_ML_ONLY
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#include "mkl_service.h"
@@ -40,7 +50,8 @@ limitations under the License.
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
-#ifndef INTEL_MKL_ML
+
+#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
#include "tensorflow/core/lib/core/stringpiece.h"
@@ -76,7 +87,7 @@ typedef enum {
Dim_I = 1
} MklDnnDims;
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
class MklShape {
public:
MklShape() {}
@@ -670,14 +681,13 @@ class MklDnnShape {
// List of MklShape objects. Used in Concat/Split layers.
-
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
typedef std::vector<MklDnnShape> MklDnnShapeList;
#else
typedef std::vector<MklShape> MklShapeList;
#endif
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
// Check if all tensors specified by MklShapes are MKL tensors.
inline bool AreAllMklTensors(const MklShapeList& shapes) {
for (auto& s : shapes) {
@@ -760,7 +770,7 @@ inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
#endif
// Get the MKL shape from the second string tensor
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) {
mklshape->DeSerializeMklShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
@@ -795,7 +805,7 @@ inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
ctext->input_list(name, input_tensors);
}
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklShapeList* mkl_shapes) {
@@ -825,7 +835,7 @@ inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
#endif
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
/// Get shape of input tensor pointed by 'input_idx' in TensorShape format.
/// If the input tensor is in MKL layout, then obtains TensorShape from
/// MklShape.
@@ -845,7 +855,7 @@ inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) {
}
#endif
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
@@ -878,7 +888,7 @@ inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
}
#endif
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
@@ -923,7 +933,7 @@ inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
// Allocates a temp tensor and returns the data buffer for temporary storage.
// Currently
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::primitive_desc& pd, void** buf_out) {
@@ -972,7 +982,7 @@ inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides,
}
}
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
inline void MklSizesToTFSizes(OpKernelContext* context,
TensorFormat data_format_,
const MklShape& mkl_shape,
@@ -1016,7 +1026,7 @@ inline int32 GetMklTensorDimIndex(char dimension) {
}
}
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) {
int index = GetMklTensorDimIndex(dimension);
CHECK(index >= 0 && index < mkl_shape.GetDimension())
@@ -1046,7 +1056,7 @@ inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
context->set_output(idx_meta_out, meta_output);
}
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
@@ -1084,7 +1094,7 @@ inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
}
#endif
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
@@ -1142,7 +1152,7 @@ inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
}
}
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
// Set a dummy MKLDNN shape (called when the output is in TF format)
inline void SetDummyMklDnnShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
@@ -1186,7 +1196,7 @@ inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
}
}
-#ifdef INTEL_MKL_ML
+#ifdef INTEL_MKL_ML_ONLY
// Set a dummy MKL shape (called when the output is in TF format)
inline void SetDummyMklShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
@@ -1303,7 +1313,7 @@ inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) {
#endif
// -------------------------------------------------------------------
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
/// Return MKL-DNN data type (memory::data_type) for input type T
///
diff --git a/tensorflow/core/util/mkl_util_test.cc b/tensorflow/core/util/mkl_util_test.cc
index cd1d0713ad..4f837f105d 100644
--- a/tensorflow/core/util/mkl_util_test.cc
+++ b/tensorflow/core/util/mkl_util_test.cc
@@ -22,7 +22,7 @@ limitations under the License.
namespace tensorflow {
namespace {
-#ifndef INTEL_MKL_ML
+#ifndef INTEL_MKL_ML_ONLY
TEST(MklUtilTest, MklDnnTfShape) {
auto cpu_engine = engine(engine::cpu, 0);
@@ -84,7 +84,7 @@ TEST(MklUtilTest, MklDnnBlockedFormatTest) {
EXPECT_EQ(b_md2.data.format, mkldnn_blocked);
}
-#endif // INTEL_MKL_ML
+#endif // INTEL_MKL_ML_ONLY
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl
index ca9c0d0aae..30dc4313d0 100644
--- a/tensorflow/tensorflow.bzl
+++ b/tensorflow/tensorflow.bzl
@@ -230,7 +230,7 @@ def tf_copts(android_optimization_level_override="-O2", is_external=False):
+ if_cuda(["-DGOOGLE_CUDA=1"])
+ if_tensorrt(["-DGOOGLE_TENSORRT=1"])
+ if_mkl(["-DINTEL_MKL=1", "-DEIGEN_USE_VML"])
- + if_mkl_open_source_only(["-DDO_NOT_USE_ML"])
+ + if_mkl_open_source_only(["-DINTEL_MKL_DNN_ONLY"])
+ if_mkl_lnx_x64(["-fopenmp"])
+ if_android_arm(["-mfpu=neon"])
+ if_linux_x86_64(["-msse3"])