aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/core/kernels/mkl_conv_grad_input_ops.cc')
-rw-r--r--tensorflow/core/kernels/mkl_conv_grad_input_ops.cc471
1 files changed, 403 insertions, 68 deletions
diff --git a/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc b/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
index 21b18f9119..b0f7faaa1a 100644
--- a/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
@@ -55,9 +55,246 @@ using mkldnn::stream;
#endif
namespace tensorflow {
-
typedef Eigen::ThreadPoolDevice CPUDevice;
+#ifndef INTEL_MKL_ML
+
+/// utility classes enabling primitive reuse for backward conv2d ops.
+struct MklConvBwdInputParams {
+ memory::dims diff_src_dims;
+ memory::dims filter_dims;
+ memory::dims diff_dst_dims;
+ memory::dims strides;
+ memory::dims dilations;
+ memory::dims padding_left;
+ memory::dims padding_right;
+ padding_kind padding;
+
+ MklConvBwdInputParams(memory::dims diff_src_dims,
+ memory::dims filter_dims, memory::dims diff_dst_dims,
+ memory::dims strides, memory::dims dilations,
+ memory::dims padding_left, memory::dims padding_right,
+ padding_kind padding) :
+ diff_src_dims(diff_src_dims), filter_dims(filter_dims),
+ diff_dst_dims(diff_dst_dims), strides(strides),
+ dilations(dilations), padding_left(padding_left),
+ padding_right(padding_right), padding(padding) {
+ }
+};
+
+template <typename T>
+class MklConv2DBwdInputPrimitive : public MklPrimitive {
+ public:
+ explicit MklConv2DBwdInputPrimitive(
+ const MklConvBwdInputParams& convBwdInputDims) :
+ cpu_engine_(engine::cpu, 0) {
+ context_.bwd_input_stream.reset(new stream(stream::kind::eager));
+
+ // create conv primitive
+ if (context_.conv_bwd_input == nullptr) {
+ Setup(convBwdInputDims);
+ }
+ }
+ ~MklConv2DBwdInputPrimitive() {}
+
+ // Convolution backward filter (weights)
+ // diff_src_data: output data buffer of diff_src
+ // filter_data: input data buffer of filter (weights)
+ // diff_dst_data: input data buffer of dst
+ // Bias does not matter here
+ void Execute(const T* diff_src_data,
+ const T* filter_data, const T* diff_dst_data) {
+ context_.diff_src_mem->set_data_handle(
+ static_cast<T*>(const_cast<T*>(diff_src_data)));
+ context_.filter_mem->set_data_handle(
+ static_cast<T*>(const_cast<T*>(filter_data)));
+ context_.diff_dst_mem->set_data_handle(
+ static_cast<T*>(const_cast<T*>(diff_dst_data)));
+
+ context_.bwd_input_stream->submit(context_.bwd_input_primitives);
+
+ // set back data handle
+ context_.diff_src_mem->set_data_handle(DummyData);
+ context_.filter_mem->set_data_handle(DummyData);
+ context_.diff_dst_mem->set_data_handle(DummyData);
+ return;
+ }
+
+ memory::format GetFilterMemoryFormat() const {
+ return context_.filter_fmt;
+ }
+
+ memory::format GetDiffDstMemoryFormat() const {
+ return context_.diff_dst_fmt;
+ }
+
+ std::shared_ptr<mkldnn::convolution_backward_data::primitive_desc>
+ GetPrimitiveDesc() const {
+ return context_.bwd_input_pd;
+ }
+
+ private:
+ // Primitive reuse context for Conv2D Bwd Input op
+ struct ConvBwdInputContext {
+ // expected memory format for this primitive instance
+ memory::format filter_fmt;
+ memory::format diff_dst_fmt;
+
+ // MKLDNN memory
+ std::shared_ptr<mkldnn::memory> diff_src_mem;
+ std::shared_ptr<mkldnn::memory> filter_mem;
+ std::shared_ptr<mkldnn::memory> diff_dst_mem;
+
+ // convolution primitive
+ std::shared_ptr<mkldnn::convolution_backward_data::primitive_desc>
+ bwd_input_pd;
+ std::shared_ptr<mkldnn::primitive> conv_bwd_input;
+
+ // desc & prmitive desc
+ std::shared_ptr<mkldnn::convolution_backward_data::desc> bwd_input_desc;
+ std::shared_ptr<mkldnn::convolution_forward::desc> fwd_desc;
+ std::shared_ptr<mkldnn::convolution_forward::primitive_desc> fwd_pd;
+
+ // memory desc: forward & backward can share same memory::desc
+ std::shared_ptr<memory::desc> diff_src_md;
+ std::shared_ptr<memory::desc> filter_md;
+ std::shared_ptr<memory::desc> diff_dst_md;
+
+ // MKL pipeline
+ std::shared_ptr<mkldnn::stream> bwd_input_stream;
+ std::vector<mkldnn::primitive> bwd_input_primitives;
+
+ ConvBwdInputContext() :
+ filter_fmt(memory::format::any), diff_dst_fmt(memory::format::any),
+ diff_src_mem(nullptr), filter_mem(nullptr), diff_dst_mem(nullptr),
+ bwd_input_pd(nullptr), conv_bwd_input(nullptr),
+ bwd_input_desc(nullptr), fwd_desc(nullptr), fwd_pd(nullptr),
+ diff_src_md(nullptr), filter_md(nullptr), diff_dst_md(nullptr),
+ bwd_input_stream(nullptr) {
+ }
+ };
+
+
+ void Setup(const MklConvBwdInputParams& convBwdInputDims) {
+ // create memory descriptors for convolution data w/ no specified format
+ context_.diff_src_md.reset(new memory::desc(
+ {convBwdInputDims.diff_src_dims},
+ MklDnnType<T>(), memory::format::any));
+ context_.filter_md.reset(new memory::desc(
+ {convBwdInputDims.filter_dims},
+ MklDnnType<T>(), memory::format::any));
+ context_.diff_dst_md.reset(new memory::desc(
+ {convBwdInputDims.diff_dst_dims},
+ MklDnnType<T>(), memory::format::any));
+
+ // create convolution primitives
+ context_.bwd_input_desc.reset(new convolution_backward_data::desc(
+ convolution_direct, *context_.diff_src_md, *context_.filter_md,
+ *context_.diff_dst_md, convBwdInputDims.strides,
+ convBwdInputDims.dilations, convBwdInputDims.padding_left,
+ convBwdInputDims.padding_right, convBwdInputDims.padding));
+
+ context_.fwd_desc.reset(new convolution_forward::desc(prop_kind::forward,
+ convolution_direct, *context_.diff_src_md, *context_.filter_md,
+ *context_.diff_dst_md, convBwdInputDims.strides,
+ convBwdInputDims.dilations, convBwdInputDims.padding_left,
+ convBwdInputDims.padding_right, convBwdInputDims.padding));
+
+ context_.fwd_pd.reset(new convolution_forward::primitive_desc(
+ *context_.fwd_desc, cpu_engine_));
+
+ // create backward conv prim desc
+ context_.bwd_input_pd.reset(
+ new convolution_backward_data::primitive_desc(
+ *context_.bwd_input_desc, cpu_engine_, *context_.fwd_pd));
+
+ // create memory primitive based on dummy data
+ context_.diff_src_mem.reset(new memory(
+ context_.bwd_input_pd.get()->diff_src_primitive_desc(), DummyData));
+ context_.filter_mem.reset(new memory(
+ context_.bwd_input_pd.get()->weights_primitive_desc(), DummyData));
+ context_.diff_dst_mem.reset(new memory(
+ context_.bwd_input_pd.get()->diff_dst_primitive_desc(), DummyData));
+
+ // store the expected memory format
+ context_.filter_fmt = static_cast<memory::format>(
+ context_.bwd_input_pd.get()->weights_primitive_desc().desc().data.format);
+ context_.diff_dst_fmt = static_cast<memory::format>(
+ context_.bwd_input_pd.get()->diff_dst_primitive_desc().desc().data.format);
+
+ // create convolution primitive and add it to net
+ context_.conv_bwd_input.reset(new convolution_backward_data(
+ *context_.bwd_input_pd, *context_.diff_dst_mem,
+ *context_.filter_mem, *context_.diff_src_mem));
+
+ context_.bwd_input_primitives.push_back(*context_.conv_bwd_input);
+ }
+
+ struct ConvBwdInputContext context_;
+ engine cpu_engine_;
+};
+
+template <typename T>
+class MklConv2DBwdInputPrimitiveFactory : public MklPrimitiveFactory<T> {
+ private:
+ MklConv2DBwdInputPrimitiveFactory() {}
+ ~MklConv2DBwdInputPrimitiveFactory() {}
+
+ public:
+ static MklConv2DBwdInputPrimitive<T>* Get(
+ const MklConvBwdInputParams& convBwdInputDims) {
+ MklConv2DBwdInputPrimitive<T>* conv2d_bwd_input = nullptr;
+
+ // look into the pool for reusable primitive
+ conv2d_bwd_input = dynamic_cast<MklConv2DBwdInputPrimitive<T>*> (
+ MklConv2DBwdInputPrimitiveFactory<T>::GetInstance().GetConv2dBwdInput(
+ convBwdInputDims));
+
+ if (conv2d_bwd_input == nullptr) {
+ conv2d_bwd_input = new MklConv2DBwdInputPrimitive<T>(
+ convBwdInputDims);
+ MklConv2DBwdInputPrimitiveFactory<T>::GetInstance().SetConv2dBwdInput(
+ convBwdInputDims, conv2d_bwd_input);
+ }
+ return conv2d_bwd_input;
+ }
+
+ private:
+ static MklConv2DBwdInputPrimitiveFactory& GetInstance() {
+ static MklConv2DBwdInputPrimitiveFactory instance_;
+ return instance_;
+ }
+
+ static std::string CreateKey(
+ const MklConvBwdInputParams& convBwdInputDims) {
+ std::string prefix = "conv2d_bwd_input";
+ FactoryKeyCreator key_creator;
+ key_creator.AddAsKey(prefix);
+ key_creator.AddAsKey(convBwdInputDims.diff_src_dims);
+ key_creator.AddAsKey(convBwdInputDims.filter_dims);
+ key_creator.AddAsKey(convBwdInputDims.diff_dst_dims);
+ key_creator.AddAsKey(convBwdInputDims.strides);
+ key_creator.AddAsKey(convBwdInputDims.dilations);
+ key_creator.AddAsKey(convBwdInputDims.padding_left);
+ key_creator.AddAsKey(convBwdInputDims.padding_right);
+ return key_creator.GetKey();
+ }
+
+ MklPrimitive* GetConv2dBwdInput(
+ const MklConvBwdInputParams& convBwdInputDims) {
+ std::string key = CreateKey(convBwdInputDims);
+ return this->GetOp(key);
+ }
+
+ void SetConv2dBwdInput(
+ const MklConvBwdInputParams& convBwdInputDims, MklPrimitive *op) {
+ std::string key = CreateKey(convBwdInputDims);
+ this->SetOp(key, op);
+ }
+};
+
+#endif
+
#ifdef INTEL_MKL_ML
template <typename Device, class T>
@@ -365,13 +602,168 @@ class MklConv2DCustomBackpropInputOp
: public MklConv2DBackpropCommonOp<Device, T> {
public:
explicit MklConv2DCustomBackpropInputOp(OpKernelConstruction* context)
- : MklConv2DBackpropCommonOp<Device, T>(context) {}
+ : MklConv2DBackpropCommonOp<Device, T>(context) {
+ }
+
~MklConv2DCustomBackpropInputOp() {}
+ void Compute(OpKernelContext* context) {
+ try {
+ MklDnnData<T> filter(&cpu_engine);
+ MklDnnData<T> diff_dst(&cpu_engine);
+
+ // Input tensors
+ const int kInputIdx = 0, kFilterIdx = 1, kOutbpropIdx = 2;
+ const Tensor& src_tensor = MklGetInput(context, kInputIdx);
+ const Tensor& filter_tensor = MklGetInput(context, kFilterIdx);
+ const Tensor& diff_dst_tensor = MklGetInput(context, kOutbpropIdx);
+
+ MklDnnShape src_mkl_shape, filter_mkl_shape, diff_dst_mkl_shape;
+ GetMklShape(context, kInputIdx, &src_mkl_shape);
+ GetMklShape(context, kFilterIdx, &filter_mkl_shape);
+ GetMklShape(context, kOutbpropIdx, &diff_dst_mkl_shape);
+ // Allow operator-specific sanity checking of shapes.
+ ValidateMklShapes(src_mkl_shape, filter_mkl_shape,
+ diff_dst_mkl_shape);
+
+ // Allow operator-specific generation of shapes.
+ // E.g., Conv2DBackpropFilter gets filter as filter_sizes. It is a
+ // tensor containing shape of filter. So filter.shape() is not
+ // a correct way to get filter shape. These operator-specific calls
+ // allow this class to handle this case.
+ TensorShape src_tf_shape = MakeInputTfShape(context, src_tensor);
+ TensorShape filter_tf_shape = MakeFilterTfShape(context, filter_tensor);
+ TensorShape diff_dst_tf_shape = GetTfShape(context, kOutbpropIdx);
+
+ // Corner cases: output with 0 elements and 0 batch size.
+ Tensor* diff_src_tensor = nullptr;
+ if (src_tf_shape.num_elements() == 0 ||
+ filter_tf_shape.num_elements() == 0 ||
+ diff_dst_tf_shape.num_elements() == 0) {
+ MklDnnShape diff_src_mkl_shape;
+ diff_src_mkl_shape.SetMklTensor(false);
+ TensorShape diff_src_tf_shape = GetOutputTfShape(
+ src_tf_shape, filter_tf_shape, diff_dst_tf_shape);
+ const int kOutputIdx = 0;
+ AllocateOutputSetMklShape(context, kOutputIdx, &diff_src_tensor,
+ diff_src_tf_shape, diff_src_mkl_shape);
+ CHECK_NOTNULL(diff_src_tensor);
+
+ // if output tensor has more than 0 elements, we need to 0 them out.
+ auto diff_src_data = diff_src_tensor->flat<T>().data();
+ for (size_t i = 0; i < diff_src_tf_shape.num_elements(); ++i) {
+ diff_src_data[i] = 0;
+ }
+ return;
+ }
+ // By default, all dims are in MKL order. Only dims in TF order
+ // are those with postfix tf_order.
+ memory::dims diff_dst_dims, fwd_src_dims, fwd_filter_dims;
+ memory::dims padding_left, padding_right, dilations, strides;
+ memory::dims fwd_output_dims, fwd_output_dims_tf_order;
+
+ // Get forward convolution parameters.
+ MklDnnConvUtil conv_utl(context, this->strides_, this->padding_,
+ this->data_format_, this->dilations_);
+ conv_utl.GetConvFwdSizesInMklOrder(
+ src_tf_shape, filter_tf_shape, &fwd_src_dims, &fwd_filter_dims,
+ &strides, &dilations, &fwd_output_dims_tf_order, &fwd_output_dims,
+ &padding_left, &padding_right);
+ if (!context->status().ok()) return;
+
+ // Create Convolution forward descriptor since Convolution backward
+ // API needs it. For that, we first need to create input, filter
+ // and output memory descriptors.
+ auto tf_fmt = TFDataFormatToMklDnnDataFormat(this->data_format_);
+
+ // If filter is in MKL layout, then simply grab filter layout;
+ // otherwise, construct filter in TF layout.
+ // For TF layout, filter is in HWIO format.
+ auto fwd_filter_md = filter_mkl_shape.IsMklTensor()
+ ? filter_mkl_shape.GetMklLayout()
+ : memory::desc(fwd_filter_dims, MklDnnType<T>(),
+ memory::format::hwio);
+
+ conv_utl.GetInputSizeInMklOrder(diff_dst_tf_shape, &diff_dst_dims);
+ if (!context->status().ok()) return;
+ auto diff_dst_md = diff_dst_mkl_shape.IsMklTensor()
+ ? diff_dst_mkl_shape.GetMklLayout()
+ : memory::desc(diff_dst_dims,
+ MklDnnType<T>(), tf_fmt);
+
+ dilations[kDilationH] -= 1;
+ dilations[kDilationW] -= 1;
+
+ MklConv2DBwdInputPrimitive<T> *conv2d_bwd_input = nullptr;
+ conv_utl.GetInputSizeInMklOrder(diff_dst_tf_shape, &diff_dst_dims);
+ MklConvBwdInputParams convBwdInputDims(fwd_src_dims, fwd_filter_dims,
+ diff_dst_dims, strides, dilations, padding_left, padding_right,
+ TFPaddingToMklDnnPadding(this->padding_));
+ conv2d_bwd_input = MklConv2DBwdInputPrimitiveFactory<T>::Get(
+ convBwdInputDims);
+ auto bwd_input_pd = conv2d_bwd_input->GetPrimitiveDesc();
+
+ // allocate output tensor
+ auto diff_src_pd = bwd_input_pd->diff_src_primitive_desc();
+ auto bwd_diff_src_dims = GetOutputDims(fwd_src_dims, fwd_filter_dims);
+ auto bwd_diff_src_format = GetOutputFormat(tf_fmt);
+ MklDnnShape diff_src_mkl_shape;
+ diff_src_mkl_shape.SetMklTensor(true);
+ diff_src_mkl_shape.SetMklLayout(&diff_src_pd);
+ diff_src_mkl_shape.SetElemType(MklDnnType<T>());
+ diff_src_mkl_shape.SetTfLayout(bwd_diff_src_dims.size(),
+ bwd_diff_src_dims, bwd_diff_src_format);
+ TensorShape diff_src_tf_shape;
+ diff_src_tf_shape.AddDim(diff_src_pd.get_size() / sizeof(T));
+ AllocateOutputSetMklShape(context, 0, &diff_src_tensor,
+ diff_src_tf_shape, diff_src_mkl_shape);
+
+ T *diff_src_data = static_cast<T*>(const_cast<T*>(
+ diff_src_tensor->flat<T>().data()));
+
+ // check if filter and diff_dst need reorder
+ T* filter_data = nullptr;
+ if (fwd_filter_md.data.format !=
+ conv2d_bwd_input->GetFilterMemoryFormat()) {
+ filter.SetUsrMem(fwd_filter_md, &filter_tensor);
+ filter.CheckReorderToOpMem(bwd_input_pd->weights_primitive_desc());
+ filter_data = static_cast<T*>(filter.GetOpMem().get_data_handle());
+ } else {
+ filter_data = static_cast<T*>(const_cast<T*>(
+ filter_tensor.flat<T>().data()));
+ }
+
+ T* diff_dst_data = nullptr;
+ if (diff_dst_md.data.format !=
+ conv2d_bwd_input->GetDiffDstMemoryFormat()) {
+ diff_dst.SetUsrMem(diff_dst_md, &diff_dst_tensor);
+ diff_dst.CheckReorderToOpMem(bwd_input_pd->diff_dst_primitive_desc());
+ diff_dst_data = static_cast<T*>(
+ diff_dst.GetOpMem().get_data_handle());
+ } else {
+ diff_dst_data = static_cast<T*>(const_cast<T*>(
+ diff_dst_tensor.flat<T>().data()));
+ }
+
+ // execute convolution input bwd
+ conv2d_bwd_input->Execute(diff_src_data, filter_data, diff_dst_data);
+ } catch (mkldnn::error& e) {
+ string error_msg = "Status: " + std::to_string(e.status) +
+ ", message: " + string(e.message) + ", in file " +
+ string(__FILE__) + ":" + std::to_string(__LINE__);
+ OP_REQUIRES_OK(
+ context,
+ errors::Aborted("Operation received an exception:", error_msg));
+ }
+ }
+
private:
- const int kInputIndex_Filter = 1, kInputIndex_InputSizes = 0,
- kInputIndex_OutBackProp = 2;
+ const int kInputIndex_Filter = 1, kInputIndex_InputSizes = 0;
const int kDilationH = 0, kDilationW = 1;
+ engine cpu_engine = engine(engine::cpu, 0);
+
+ // Validate input shapes.
+ // Function asserts that input shapes are valid.
void ValidateMklShapes(const MklDnnShape& input_mkl_shape,
const MklDnnShape& filter_mkl_shape,
const MklDnnShape& obp_mkl_shape) {
@@ -382,8 +774,7 @@ class MklConv2DCustomBackpropInputOp
<< "Conv2DBackpropInput: input should not be in MKL Layout";
}
- size_t GetInputTensorIndexWithSizes() { return kInputIndex_InputSizes; }
-
+ // Get TensorFlow shape of input tensor.
TensorShape MakeInputTfShape(OpKernelContext* context,
const Tensor& input_tensor) {
TensorShape input_tf_shape;
@@ -395,72 +786,32 @@ class MklConv2DCustomBackpropInputOp
return input_tf_shape;
}
+ // Get TensorFlow shape of filter tensor.
TensorShape MakeFilterTfShape(OpKernelContext* context,
const Tensor& filter_tensor) {
return GetTfShape(context, kInputIndex_Filter);
}
+ // Get the Tensorflow shape of Output (diff_src),
+ // which is same as shape of Conv2D 'input'.
TensorShape GetOutputTfShape(const TensorShape& input_shape,
const TensorShape& filter_shape,
const TensorShape& outbprop_shape) {
- // Output Shape of Conv2DBackpropInput is same as shape of Conv2D 'input'.
return input_shape;
}
+ // Get the Tensorflow shape of Output (diff_src),
+ // which is same as shape of Conv2D 'input'.
const memory::dims& GetOutputDims(const memory::dims& fwd_input_dims,
const memory::dims& fwd_filter_dims) {
- // Output Shape of Conv2DBackpropInput is same as shape of Conv2D 'input'.
return fwd_input_dims;
}
+ // Output layout is Tensorflow's layout in data format order.
memory::format GetOutputFormat(const memory::format data_format) {
- // Output layout is Tensorflow's layout in data format order.
return data_format;
}
- void CreatePrimitive(OpKernelContext* context, const engine& cpu_engine,
- const convolution_forward::primitive_desc& conv_fwd_pd,
- MklDnnData<T>* input, MklDnnData<T>* filter,
- MklDnnData<T>* outbackprop, MklDnnData<T>* output,
- Tensor** output_tensor,
- const memory::dims& strides,
- const memory::dims& dilations,
- const memory::dims& padding_l,
- const memory::dims& padding_r, padding_kind padding,
- const memory::dims& bwd_output_dims,
- memory::format bwd_output_format) {
- CHECK_NOTNULL(context);
- CHECK_NOTNULL(input);
- CHECK_NOTNULL(filter);
- CHECK_NOTNULL(outbackprop);
- CHECK_NOTNULL(output);
- CHECK_NOTNULL(output_tensor);
-
- // Create convolution backward data primitive.
- // Use dilated convolution in case dilate rates are greater than zero.
- auto bwd_desc = (dilations[kDilationH] > 0 || dilations[kDilationW] > 0) ?
- convolution_backward_data::desc(convolution_direct,
- output->GetOpMemDesc(), filter->GetOpMemDesc(),
- outbackprop->GetOpMemDesc(), strides,
- dilations, padding_l, padding_r, padding):
- convolution_backward_data::desc(convolution_direct,
- output->GetOpMemDesc(), filter->GetOpMemDesc(),
- outbackprop->GetOpMemDesc(),
- strides, padding_l, padding_r, padding);
-
- auto bwd_pd = convolution_backward_data::primitive_desc(
- bwd_desc, cpu_engine, conv_fwd_pd);
-
- // Allocate output tensor in TensorFlow and MKL layout.
- AllocateOutputTensor(context, bwd_pd, bwd_output_dims, bwd_output_format,
- output_tensor);
- CHECK_NOTNULL(*output_tensor);
- // Set buffer handle using allocated output tensor.
- output->SetUsrMemDataHandle(*output_tensor);
-
- PrepareAndExecutePrimitive(bwd_pd, filter, outbackprop, output);
- }
-
// Allocate output tensor.
void AllocateOutputTensor(
OpKernelContext* context,
@@ -487,22 +838,6 @@ class MklConv2DCustomBackpropInputOp
AllocateOutputSetMklShape(context, 0, output_tensor, output_tf_shape,
output_mkl_shape);
}
-
- // Prepare and execute net - checks for input and output reorders.
- void PrepareAndExecutePrimitive(
- const convolution_backward_data::primitive_desc& conv_pd,
- MklDnnData<T>* filter, MklDnnData<T>* obp, MklDnnData<T>* output) {
- // Create reorders between user layout and MKL layout if it is needed and
- // add it to the net before convolution.
- std::vector<primitive> net;
- filter->CheckReorderToOpMem(conv_pd.weights_primitive_desc(), &net);
- obp->CheckReorderToOpMem(conv_pd.diff_dst_primitive_desc(), &net);
-
- net.push_back(convolution_backward_data(
- conv_pd, obp->GetOpMem(), filter->GetOpMem(), output->GetOpMem()));
-
- stream(stream::kind::eager).submit(net).wait();
- }
};
#endif // INTEL_MKL_ML