diff options
author | Shanqing Cai <cais@google.com> | 2017-09-25 19:35:53 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2017-09-25 19:39:42 -0700 |
commit | e2e3a943c0a28b7656325acb3fcd035743d55ea0 (patch) | |
tree | f4b909d5410bdf3b94012392909e7805cd27a2a7 /tensorflow/core/kernels/mkl_aggregate_ops.cc | |
parent | df22044be98c8b707601e03fe22ded53bcc28c7e (diff) |
Merge changes from github.
END_PUBLIC
---
Commit 1e1b3d902 authored by Pete Warden<pete@petewarden.com>
Committed by gunan<gunan@google.com>:
Changed output directory for Pi CI build to fix permissions problem with nightlies (#13257)
* Fix for RTLD_GLOBAL breakage of Pi builds, and removed Eigen version change for Pi that's no longer needed
* Fixed Pi Zero OpenBLAS build problems and tidied up directories used
* More robust checks in Pi build script
* Changed output directory for Pi CI build to fix permissions problem
---
Commit fe3a2e65c authored by Yan Facai (???)<facai.yan@gmail.com>
Committed by drpngx<drpngx@users.noreply.github.com>:
check invalid string type for dest_nodes in extract_sub_graph (#13057)
* BUG: check str type
* TST: add unit test
* CLN: remove list check
* CLN: use warning
* CLN: 2 indent
* CLN: raise TypeError if not list
* CLN: check string only
---
Commit 225ab7629 authored by Jean Wanka<jm.wanka@gmail.com>
Committed by Jean Wanka<jm.wanka@gmail.com>:
Fix polynomial decay with cycle for global step=0
For polynomial decay with cycle=True the learning rate at
step 0 becomes NaN, because in the process of calculating it we
devide by 0. This change should fix it, by setting the multiplier
for the decay steps to one for global_step=0.
---
Commit 286f57061 authored by Bjarke Hammersholt Roune<broune@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
Make Service::TransferToClient not attempt to manipulate the literal when the transfer failed, preventing a crash and allowing the caller to see the reason for the failed transfer.
PiperOrigin-RevId: 169770126
---
Commit e0501bc4d authored by Yong Tang<yong.tang.github@outlook.com>
Committed by Shanqing Cai<cais@google.com>:
Fix GRUBlockCell parameter naming inconsistency (#13153)
* Fix GRUBlockCell parameter naming inconsistency
This fix tries to fix the issue in 13137 where
parameter `cell_size` is used instead of `num_units`.
This is inconsistent with other RNN cells.
This fix adds support of `num_units` while at the same
time maintains backward compatiblility for `cell_size`.
This fix fixes 13137.
Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
* Add `@deprecated_args` for 'cell_size' in `GRUBlockCell`
This commit adds `@deprecated_args` for 'cell_size' in `GRUBlockCell`
Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
* Address review comment
Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
---
Commit 02a2eba05 authored by Pete Warden<pete@petewarden.com>
Committed by gunan<gunan@google.com>:
Fix for RTLD_GLOBAL breakage of Pi builds, and removed Eigen version change that's no longer needed (#13251)
* Fix for RTLD_GLOBAL breakage of Pi builds, and removed Eigen version change for Pi that's no longer needed
* Fixed Pi Zero OpenBLAS build problems and tidied up directories used
* More robust checks in Pi build script
---
Commit 8ef722253 authored by Sanjoy Das<sanjoy@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
Remove a redundant setName.
The EmitComputation should have emitted a function with the right name, so use a
CHECK instead.
PiperOrigin-RevId: 169764856
---
Commit 1b94147dc authored by Neal Wu<wun@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
Fix broken GitHub links in tensorflow and tensorflow_models resulting from The Great Models Move (a.k.a. the research subfolder)
PiperOrigin-RevId: 169763373
---
Commit b1ada5f0c authored by Justine Tunney<jart@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
Fix TensorBoard python -m invoke in docs
PiperOrigin-RevId: 169758752
---
Commit 2957cd894 authored by Mustafa Ispir<ispir@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
Local run option of estimator training.
PiperOrigin-RevId: 169756384
---
Commit 1dc2fe7ac authored by Gunhan Gulsoy<gunan@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
BEGIN_PUBLIC
Automated g4 rollback of changelist 166264198
PiperOrigin-RevId: 169998124
Diffstat (limited to 'tensorflow/core/kernels/mkl_aggregate_ops.cc')
-rw-r--r-- | tensorflow/core/kernels/mkl_aggregate_ops.cc | 110 |
1 files changed, 60 insertions, 50 deletions
diff --git a/tensorflow/core/kernels/mkl_aggregate_ops.cc b/tensorflow/core/kernels/mkl_aggregate_ops.cc index 51ba127def..935eb81dd0 100644 --- a/tensorflow/core/kernels/mkl_aggregate_ops.cc +++ b/tensorflow/core/kernels/mkl_aggregate_ops.cc @@ -54,17 +54,62 @@ class MklAddNOp : public OpKernel { GetMklShape(ctx, 1, &(mkl_context.input2_shape)); bool input2_in_mkl_format = mkl_context.input2_shape.IsMklTensor(); + // handle the case of a scalar + if (!input1_in_mkl_format && input0.dims() == 0) { + const TensorShape& o_shape = input0.shape(); + Tensor* out_tensor = nullptr; + mkl_context.output_shape.SetMklTensor(false); + AllocateOutputSetMklShape(ctx, 0, &out_tensor, o_shape, + mkl_context.output_shape); + float user_i1 = (input0.scalar<T>()()); + ; + float user_i2 = (input1.scalar<T>()()); + ; + out_tensor->scalar<T>()() = std::plus<float>{}(user_i1, user_i2); + return; + } + mkl_context.in_dims = input1_in_mkl_format ? mkl_context.input1_shape.GetDimension() : input0.dims(); mkl_context.in_dims = input2_in_mkl_format ? mkl_context.input2_shape.GetDimension() : input1.dims(); + + // If there is nothing to compute, return. + if (!input1_in_mkl_format && !input2_in_mkl_format) { + const TensorShape& o_shape = input0.shape(); + if (o_shape.num_elements() == 0) { + Tensor* out_tensor = nullptr; + mkl_context.output_shape.SetMklTensor(false); + AllocateOutputSetMklShape(ctx, 0, &out_tensor, o_shape, + mkl_context.output_shape); + return; + } + } + + mkl_context.in_sizes = new size_t[mkl_context.in_dims]; + mkl_context.in_strides = new size_t[mkl_context.in_dims]; // Generate size, stride for input if input is in MKL format. - ExtractMklOpParams(&mkl_context.in1_sizes, - &mkl_context.in1_strides, input0, &mkl_context.input1_shape); - ExtractMklOpParams(&mkl_context.in2_sizes, - &mkl_context.in2_strides, input1, &mkl_context.input2_shape); + if (input1_in_mkl_format || input2_in_mkl_format) { + const MklShape* tmp_mkl_shape = (input1_in_mkl_format) + ? &mkl_context.input1_shape + : &mkl_context.input2_shape; + for (int i = 0; i < mkl_context.in_dims; i++) { + mkl_context.in_sizes[i] = tmp_mkl_shape->GetSizes()[i]; + mkl_context.in_strides[i] = tmp_mkl_shape->GetStrides()[i]; + } + } else { + for (int i = 0; i < mkl_context.in_dims; i++) { + mkl_context.in_sizes[i] = + input0.dim_size((mkl_context.in_dims - 1) - i); + } + mkl_context.in_strides[0] = 1; + for (int i = 1; i < mkl_context.in_dims; i++) { + mkl_context.in_strides[i] = + mkl_context.in_strides[i - 1] * mkl_context.in_sizes[i - 1]; + } + } std::vector<float> coeff(2, 1.0); mkl_context.MklCreateInputLayouts(ctx); @@ -82,7 +127,7 @@ class MklAddNOp : public OpKernel { mkl_context.output_shape.SetMklLayout(mkl_context.Eltwise, dnnResourceDst); mkl_context.output_shape.SetTfLayout( - mkl_context.in_dims, mkl_context.in1_sizes, mkl_context.in1_strides); + mkl_context.in_dims, mkl_context.in_sizes, mkl_context.in_strides); if (input1_in_mkl_format == true) { mkl_context.output_shape.SetTfDimOrder(mkl_context.in_dims, mkl_context.input1_shape.GetTfToMklDimMap()); @@ -113,44 +158,11 @@ class MklAddNOp : public OpKernel { mkl_context.MklCleanup(); } - void ExtractMklOpParams(size_t** out_sizes, size_t** out_strides, - const Tensor& input, const MklShape* input_shape) { - bool input_in_mkl_format = input_shape->IsMklTensor(); - int in_dims = input_in_mkl_format - ? input_shape->GetDimension() - : input.dims(); - size_t* in_sizes = new size_t[in_dims]; - size_t* in_strides = new size_t[in_dims]; - - if (input_in_mkl_format) { - for (int i = 0; i < in_dims; i++) { - in_sizes[i] = input_shape->GetSizes()[i]; - in_strides[i] = input_shape->GetStrides()[i]; - } - } else { - for (int i = 0; i < in_dims; i++) { - in_sizes[i] = - input.dim_size((in_dims - 1) - i); - } - in_strides[0] = 1; - for (int i = 1; i < in_dims; i++) { - in_strides[i] = - in_strides[i - 1] * in_sizes[i - 1]; - } - } - *out_sizes = in_sizes; - *out_strides = in_strides; - } - - private: typedef struct { int in_dims; - size_t* in1_sizes; - size_t* in1_strides; - - size_t* in2_sizes; - size_t* in2_strides; + size_t* in_sizes = nullptr; + size_t* in_strides = nullptr; dnnPrimitive_t Eltwise = nullptr; dnnPrimitiveAttributes_t attributes = nullptr; void* Eltwise_res[dnnResourceNumber]; @@ -160,18 +172,16 @@ class MklAddNOp : public OpKernel { void MklCreateInputLayouts(OpKernelContext* context) { bool input1_in_mkl_format = input1_shape.IsMklTensor(); if (!input1_in_mkl_format) { - CHECK_EQ( - dnnLayoutCreate_F32(<_input1, in_dims, in1_sizes, in1_strides), - E_SUCCESS); + CHECK_EQ(dnnLayoutCreate_F32(<_input1, in_dims, in_sizes, in_strides), + E_SUCCESS); } else { lt_input1 = static_cast<dnnLayout_t>(input1_shape.GetCurLayout()); } bool input2_in_mkl_format = input2_shape.IsMklTensor(); if (!input2_in_mkl_format) { - CHECK_EQ( - dnnLayoutCreate_F32(<_input2, in_dims, in2_sizes, in2_strides), - E_SUCCESS); + CHECK_EQ(dnnLayoutCreate_F32(<_input2, in_dims, in_sizes, in_strides), + E_SUCCESS); } else { lt_input2 = static_cast<dnnLayout_t>(input2_shape.GetCurLayout()); } @@ -246,15 +256,15 @@ class MklAddNOp : public OpKernel { bool input1_in_mkl_format = input1_shape.IsMklTensor(); bool input2_in_mkl_format = input2_shape.IsMklTensor(); dnnDelete_F32(Eltwise); + if (!input1_in_mkl_format || !input2_in_mkl_format) { + delete[] in_sizes; + delete[] in_strides; + } if (!input1_in_mkl_format) { dnnLayoutDelete_F32(lt_input1); - delete [] in1_sizes; - delete [] in1_strides; } if (!input2_in_mkl_format) { dnnLayoutDelete_F32(lt_input2); - delete [] in2_sizes; - delete [] in2_strides; } } } MklAddNOpContext; |