diff options
author | 2018-09-28 13:11:09 -0700 | |
---|---|---|
committer | 2018-09-28 13:11:09 -0700 | |
commit | 66b02558c71405e7e1e92b04648260a4d6cfd0f1 (patch) | |
tree | b4e61d85632db2dfac3f2800cfd8dd9133a9a16e | |
parent | 6d02ee8e581bf5211f362b80175122e3782fb37a (diff) | |
parent | 59a47b7d330a40971bad89f0e8aa282e79e889f1 (diff) |
Merge pull request #22324 from Intel-tensorflow:fix_typo_env
PiperOrigin-RevId: 214983237
-rw-r--r-- | tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc | 2 | ||||
-rw-r--r-- | tensorflow/core/kernels/mkl_conv_grad_input_ops.cc | 2 | ||||
-rw-r--r-- | tensorflow/core/kernels/mkl_conv_ops.cc | 2 | ||||
-rw-r--r-- | tensorflow/core/util/mkl_util.h | 12 |
4 files changed, 9 insertions, 9 deletions
diff --git a/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc b/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc index 52157ed5fb..f406ad2ab5 100644 --- a/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc +++ b/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc @@ -853,7 +853,7 @@ class MklConvCustomBackpropFilterOp // MKL DNN allocates large buffers when a conv gradient filter primtive is // created. So we don't cache conv backward primitives when the env - // variable TF_MKL_OPTIMIZE_PRIMITVE_MEMUSE is set to true. + // variable TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE is set to true. bool do_not_cache = MklPrimitiveFactory<T>::IsPrimitiveMemOptEnabled(); conv_bwd_filter = MklConvBwdFilterPrimitiveFactory<T>::Get( convBwdFilterDims, do_not_cache); diff --git a/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc b/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc index c38c9cc27c..a501ce2c93 100644 --- a/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc +++ b/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc @@ -713,7 +713,7 @@ class MklConvCustomBackpropInputOp : public MklConvBackpropCommonOp<Device, T> { TFPaddingToMklDnnPadding(this->padding_)); // We don't cache those primitves if the env variable - // TF_MKL_OPTIMIZE_PRIMITVE_MEMUSE is true and if primitve descriptor + // TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE is true and if primitve descriptor // includes potentialy large buffers. MKL DNN allocates buffers // in the following cases // 1. Legacy CPU without AVX512/AVX2, or diff --git a/tensorflow/core/kernels/mkl_conv_ops.cc b/tensorflow/core/kernels/mkl_conv_ops.cc index 184e0cb003..b332edad0a 100644 --- a/tensorflow/core/kernels/mkl_conv_ops.cc +++ b/tensorflow/core/kernels/mkl_conv_ops.cc @@ -901,7 +901,7 @@ class MklConvOp : public OpKernel { // In some cases, primitve descriptor includes potentialy large buffers, // we don't cache those primitves if the env variable - // TF_MKL_OPTIMIZE_PRIMITVE_MEMUSE is true. MKL DNN allocates buffers + // TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE is true. MKL DNN allocates buffers // in the following cases // 1. Legacy CPU without AVX512/AVX2, or // 2. 1x1 convolution with stride != 1 diff --git a/tensorflow/core/util/mkl_util.h b/tensorflow/core/util/mkl_util.h index cf7ffd8149..04aaea4f89 100644 --- a/tensorflow/core/util/mkl_util.h +++ b/tensorflow/core/util/mkl_util.h @@ -2039,8 +2039,8 @@ class MklPrimitiveFactory { /// Fuction to check whether primitive memory optimization is enabled static inline bool IsPrimitiveMemOptEnabled() { bool is_primitive_mem_opt_enabled = true; - TF_CHECK_OK(ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITVE_MEMUSE", true, - &is_primitive_mem_opt_enabled)); + TF_CHECK_OK(ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE", true, + &is_primitive_mem_opt_enabled)); return is_primitive_mem_opt_enabled; } @@ -2095,9 +2095,8 @@ static inline memory::format get_desired_format(int channel, fmt_desired = is_2d ? memory::format::nChw16c : memory::format::nCdhw16c; } else if (port::TestCPUFeature(port::CPUFeature::AVX2) && (channel % 8) == 0) { - fmt_desired = is_2d - ? memory::format::nChw8c - : memory::format::ncdhw; //not support avx2 for 3d yet. + fmt_desired = is_2d ? memory::format::nChw8c + : memory::format::ncdhw; // no avx2 support for 3d yet. } else { fmt_desired = is_2d ? memory::format::nchw : memory::format::ncdhw; } @@ -2209,7 +2208,8 @@ inline primitive FindOrCreateReorder(const memory* from, const memory* to) { // utility function to determine if it is conv 1x1 and stride != 1 // for purpose of temporarily disabling primitive reuse -inline bool IsConv1x1StrideNot1(memory::dims filter_dims, memory::dims strides) { +inline bool IsConv1x1StrideNot1(memory::dims filter_dims, + memory::dims strides) { if (filter_dims.size() != 4 || strides.size() != 2) return false; return ((filter_dims[2] == 1) && (filter_dims[3] == 1) && |