diff options
author | A. Unique TensorFlower <gardener@tensorflow.org> | 2018-08-10 22:34:17 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-08-10 22:38:34 -0700 |
commit | a8e78e2e617b6ca10f4878fe99fdf43ddedfa7c6 (patch) | |
tree | d6d7d12dc263afbd4faadc034753c498f574fc93 /tensorflow/core/util | |
parent | f97e945914e83bed8cc2d51a27f1394719f0e7b0 (diff) |
Rename MKL-related feature macros.
The existing feature macros are named INTEL_MKL to indicate that any flavor of
MKL is available, INTEL_MKL_ML to indicate that *only* MKL-ML is available
(i.e. MKL-DNN is not), and DO_NOT_USE_ML to indicate that *only* MKL-DNN is
available (i.e. MKL-ML is not).
This change renames INTEL_MKL_ML to INTEL_MKL_ML_ONLY and DO_NOT_USE_ML to
INTEL_MKL_DNN_ONLY. The meanings of the macros have not changed.
This change also adds a few sanity checks to mkl_util.h that ensures that the
combination of INTEL_MKL, INTEL_MKL_ML_ONLY, and INTEL_MKL_DNN_ONLY is
logically consistent: the *_ONLY macros may not both be defined, and if either
of them is defined, bare INTEL_MKL must also be defined.
PiperOrigin-RevId: 208313735
Diffstat (limited to 'tensorflow/core/util')
-rw-r--r-- | tensorflow/core/util/mkl_util.h | 48 | ||||
-rw-r--r-- | tensorflow/core/util/mkl_util_test.cc | 4 |
2 files changed, 31 insertions, 21 deletions
diff --git a/tensorflow/core/util/mkl_util.h b/tensorflow/core/util/mkl_util.h index a66b1215bd..159a787d05 100644 --- a/tensorflow/core/util/mkl_util.h +++ b/tensorflow/core/util/mkl_util.h @@ -22,7 +22,17 @@ limitations under the License. #include <utility> #include <vector> -#ifdef INTEL_MKL_ML +#if defined(INTEL_MKL_ML_ONLY) || defined(INTEL_MKL_DNN_ONLY) +#ifndef INTEL_MKL +#error "INTEL_MKL_{ML,DNN}_ONLY require INTEL_MKL" +#endif +#endif + +#if defined(INTEL_MKL_ML_ONLY) && defined(INTEL_MKL_DNN_ONLY) +#error "at most one of INTEL_MKL_ML_ONLY and INTEL_MKL_DNN_ONLY may be defined" +#endif + +#ifdef INTEL_MKL_ML_ONLY #include "mkl_dnn.h" #include "mkl_dnn_types.h" #include "mkl_service.h" @@ -40,7 +50,8 @@ limitations under the License. #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" -#ifndef INTEL_MKL_ML + +#ifndef INTEL_MKL_ML_ONLY #include "mkldnn.hpp" #include "tensorflow/core/lib/core/stringpiece.h" @@ -76,7 +87,7 @@ typedef enum { Dim_I = 1 } MklDnnDims; -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY class MklShape { public: MklShape() {} @@ -670,14 +681,13 @@ class MklDnnShape { // List of MklShape objects. Used in Concat/Split layers. - -#ifndef INTEL_MKL_ML +#ifndef INTEL_MKL_ML_ONLY typedef std::vector<MklDnnShape> MklDnnShapeList; #else typedef std::vector<MklShape> MklShapeList; #endif -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY // Check if all tensors specified by MklShapes are MKL tensors. inline bool AreAllMklTensors(const MklShapeList& shapes) { for (auto& s : shapes) { @@ -760,7 +770,7 @@ inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, #endif // Get the MKL shape from the second string tensor -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) { mklshape->DeSerializeMklShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) @@ -795,7 +805,7 @@ inline void GetMklInputList(OpKernelContext* ctext, StringPiece name, ctext->input_list(name, input_tensors); } -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklShapeList* mkl_shapes) { @@ -825,7 +835,7 @@ inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, #endif -#ifndef INTEL_MKL_ML +#ifndef INTEL_MKL_ML_ONLY /// Get shape of input tensor pointed by 'input_idx' in TensorShape format. /// If the input tensor is in MKL layout, then obtains TensorShape from /// MklShape. @@ -845,7 +855,7 @@ inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) { } #endif -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, @@ -878,7 +888,7 @@ inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, } #endif -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, @@ -923,7 +933,7 @@ inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, // Allocates a temp tensor and returns the data buffer for temporary storage. // Currently -#ifndef INTEL_MKL_ML +#ifndef INTEL_MKL_ML_ONLY template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, const memory::primitive_desc& pd, void** buf_out) { @@ -972,7 +982,7 @@ inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides, } } -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY inline void MklSizesToTFSizes(OpKernelContext* context, TensorFormat data_format_, const MklShape& mkl_shape, @@ -1016,7 +1026,7 @@ inline int32 GetMklTensorDimIndex(char dimension) { } } -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) { int index = GetMklTensorDimIndex(dimension); CHECK(index >= 0 && index < mkl_shape.GetDimension()) @@ -1046,7 +1056,7 @@ inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in, context->set_output(idx_meta_out, meta_output); } -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { @@ -1084,7 +1094,7 @@ inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, } #endif -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { @@ -1142,7 +1152,7 @@ inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in, } } -#ifndef INTEL_MKL_ML +#ifndef INTEL_MKL_ML_ONLY // Set a dummy MKLDNN shape (called when the output is in TF format) inline void SetDummyMklDnnShapeOutput(OpKernelContext* context, uint32 idx_data_out) { @@ -1186,7 +1196,7 @@ inline void ForwardMklMetaDataInToOut(OpKernelContext* context, } } -#ifdef INTEL_MKL_ML +#ifdef INTEL_MKL_ML_ONLY // Set a dummy MKL shape (called when the output is in TF format) inline void SetDummyMklShapeOutput(OpKernelContext* context, uint32 idx_data_out) { @@ -1303,7 +1313,7 @@ inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) { #endif // ------------------------------------------------------------------- -#ifndef INTEL_MKL_ML +#ifndef INTEL_MKL_ML_ONLY /// Return MKL-DNN data type (memory::data_type) for input type T /// diff --git a/tensorflow/core/util/mkl_util_test.cc b/tensorflow/core/util/mkl_util_test.cc index cd1d0713ad..4f837f105d 100644 --- a/tensorflow/core/util/mkl_util_test.cc +++ b/tensorflow/core/util/mkl_util_test.cc @@ -22,7 +22,7 @@ limitations under the License. namespace tensorflow { namespace { -#ifndef INTEL_MKL_ML +#ifndef INTEL_MKL_ML_ONLY TEST(MklUtilTest, MklDnnTfShape) { auto cpu_engine = engine(engine::cpu, 0); @@ -84,7 +84,7 @@ TEST(MklUtilTest, MklDnnBlockedFormatTest) { EXPECT_EQ(b_md2.data.format, mkldnn_blocked); } -#endif // INTEL_MKL_ML +#endif // INTEL_MKL_ML_ONLY } // namespace } // namespace tensorflow |