diff options
author | Ayush Dubey <ayushd@google.com> | 2018-09-28 14:36:16 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-09-28 14:44:49 -0700 |
commit | 5863cad53afad2fcc5d8a8dac7c2cf88e0e8ebb9 (patch) | |
tree | bc02784420c13a56826d562cb11c42cbd3ea3054 /tensorflow/core/kernels | |
parent | 17d73444f332490c733d37063710e72dc69d1141 (diff) |
Copy shape into CollectiveParams only once per CollectiveReduce kernel.
PiperOrigin-RevId: 214997213
Diffstat (limited to 'tensorflow/core/kernels')
-rw-r--r-- | tensorflow/core/kernels/collective_ops.cc | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/tensorflow/core/kernels/collective_ops.cc b/tensorflow/core/kernels/collective_ops.cc index fa959b5a0e..82e2913b64 100644 --- a/tensorflow/core/kernels/collective_ops.cc +++ b/tensorflow/core/kernels/collective_ops.cc @@ -132,7 +132,6 @@ class CollectiveReduceOpKernel : public CollectiveOpKernel { "Failed to get CollectiveExecutor from OpKernelContext for Op ", col_params_.name), done); - col_params_.instance.shape = c->input(0).shape(); // Allocate output on the first pass through this function. This must be // done immediately, while we're still in the executor thread. Otherwise // the memory is not guaranteed to be unused by any concurrently executing @@ -144,6 +143,7 @@ class CollectiveReduceOpKernel : public CollectiveOpKernel { c->forward_input_or_allocate_output( {0}, 0, c->input(0).shape(), &output), done); + col_params_.instance.shape = c->input(0).shape(); } if (!CanProceedWithCompute(c, col_exec, done)) return; auto actual_done = [c, col_exec, done](const Status& s) { |