aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/rnn
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-01-30 10:43:03 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-30 12:33:54 -0800
commit4463d105a8a4a83642b9709ba79310e8f4ddf577 (patch)
tree240e9a0a9a6b9ad956c704776a33126ba00cbfe8 /tensorflow/contrib/rnn
parent8f0e7207774279f4fe50f4d6c4fbd576e2941463 (diff)
Cleanup: Ran clang-format on all *.{cc,h} files in tensorflow/contrib/.../*.{hh,c}.
PiperOrigin-RevId: 183855242
Diffstat (limited to 'tensorflow/contrib/rnn')
-rw-r--r--tensorflow/contrib/rnn/kernels/blas_gemm.cc9
-rw-r--r--tensorflow/contrib/rnn/kernels/gru_ops.cc110
-rw-r--r--tensorflow/contrib/rnn/kernels/lstm_ops.cc193
-rw-r--r--tensorflow/contrib/rnn/kernels/lstm_ops.h1
-rw-r--r--tensorflow/contrib/rnn/ops/lstm_ops_test.cc5
5 files changed, 158 insertions, 160 deletions
diff --git a/tensorflow/contrib/rnn/kernels/blas_gemm.cc b/tensorflow/contrib/rnn/kernels/blas_gemm.cc
index e62501e9b1..03006dab32 100644
--- a/tensorflow/contrib/rnn/kernels/blas_gemm.cc
+++ b/tensorflow/contrib/rnn/kernels/blas_gemm.cc
@@ -36,11 +36,10 @@ perftools::gputools::DeviceMemory<T> AsDeviceMemory(const T* cuda_memory) {
namespace functor {
template <typename T>
-void TensorCuBlasGemm<T>::operator()(OpKernelContext* ctx,
- bool transa, bool transb, uint64 m,
- uint64 n, uint64 k, T alpha, const T* a,
- int lda, const T* b, int ldb, T beta, T* c,
- int ldc) {
+void TensorCuBlasGemm<T>::operator()(OpKernelContext* ctx, bool transa,
+ bool transb, uint64 m, uint64 n, uint64 k,
+ T alpha, const T* a, int lda, const T* b,
+ int ldb, T beta, T* c, int ldc) {
#if GOOGLE_CUDA
perftools::gputools::blas::Transpose trans[] = {
perftools::gputools::blas::Transpose::kNoTranspose,
diff --git a/tensorflow/contrib/rnn/kernels/gru_ops.cc b/tensorflow/contrib/rnn/kernels/gru_ops.cc
index 0796f82b21..bd3d898fb0 100644
--- a/tensorflow/contrib/rnn/kernels/gru_ops.cc
+++ b/tensorflow/contrib/rnn/kernels/gru_ops.cc
@@ -15,8 +15,8 @@ limitations under the License.
#define EIGEN_USE_THREADS
-#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/contrib/rnn/kernels/gru_ops.h"
+#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
@@ -61,9 +61,9 @@ class GRUCellBlockOp : public OpKernel {
h_prev_tensor->dim_size(0), " vs. ",
batch_size));
OP_REQUIRES(ctx, h_prev_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("h_prev.dims(1) != cell_size: ",
- h_prev_tensor->dim_size(1), " vs. ",
- cell_size));
+ errors::InvalidArgument(
+ "h_prev.dims(1) != cell_size: ", h_prev_tensor->dim_size(1),
+ " vs. ", cell_size));
// Shape of 'w_ru' must be [input_size+cell_size, 2*cell_size]
OP_REQUIRES(ctx, w_ru_tensor->dim_size(0) == input_size + cell_size,
@@ -82,10 +82,10 @@ class GRUCellBlockOp : public OpKernel {
"w_c.dim_size(0) != input_size + cell_size: ",
w_c_tensor->dim_size(0), " vs. ", input_size + cell_size));
- OP_REQUIRES(
- ctx, w_c_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("w_c.dim_size(1) != cell_size: ",
- w_c_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, w_c_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "w_c.dim_size(1) != cell_size: ", w_c_tensor->dim_size(1),
+ " vs. ", cell_size));
// Shape of 'b_ru' must be [2*cell_size]
OP_REQUIRES(ctx, b_ru_tensor->dim_size(0) == cell_size * 2,
@@ -97,10 +97,10 @@ class GRUCellBlockOp : public OpKernel {
errors::InvalidArgument("Rank of b_ru must be 1",
b_ru_tensor->dims(), " vs. 1", 1));
// Shape of 'b_c' must be [cell_size]
- OP_REQUIRES(
- ctx, b_c_tensor->dim_size(0) == cell_size,
- errors::InvalidArgument("b_c.dim_size(0) != cell_size: ",
- b_c_tensor->dim_size(0), " vs. ", cell_size));
+ OP_REQUIRES(ctx, b_c_tensor->dim_size(0) == cell_size,
+ errors::InvalidArgument(
+ "b_c.dim_size(0) != cell_size: ", b_c_tensor->dim_size(0),
+ " vs. ", cell_size));
OP_REQUIRES(ctx, b_c_tensor->dims() == 1,
errors::InvalidArgument("Rank of b_c must be 1",
b_c_tensor->dims(), " vs. 1"));
@@ -216,9 +216,9 @@ class GRUBlockCellGradOp : public OpKernel {
h_prev_tensor->dim_size(0), " vs. ",
batch_size));
OP_REQUIRES(ctx, h_prev_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("h_prev.dims(1) != cell_size: ",
- h_prev_tensor->dim_size(1), " vs. ",
- cell_size));
+ errors::InvalidArgument(
+ "h_prev.dims(1) != cell_size: ", h_prev_tensor->dim_size(1),
+ " vs. ", cell_size));
// Shape of 'w_ru' must be [input_size+cell_size, 2*cell_size]
OP_REQUIRES(ctx, w_ru_tensor->dim_size(0) == input_size + cell_size,
@@ -237,10 +237,10 @@ class GRUBlockCellGradOp : public OpKernel {
"w_c.dim_size(0) != input_size + cell_size: ",
w_c_tensor->dim_size(0), " vs. ", input_size + cell_size));
- OP_REQUIRES(
- ctx, w_c_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("w_c.dim_size(1) != cell_size: ",
- w_c_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, w_c_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "w_c.dim_size(1) != cell_size: ", w_c_tensor->dim_size(1),
+ " vs. ", cell_size));
// Shape of 'b_ru' must be [2*cell_size]
OP_REQUIRES(ctx, b_ru_tensor->dim_size(0) == cell_size * 2,
@@ -253,54 +253,54 @@ class GRUBlockCellGradOp : public OpKernel {
b_ru_tensor->dims(), " vs. 1"));
// Shape of 'b_c' must be [cell_size]
- OP_REQUIRES(
- ctx, b_c_tensor->dim_size(0) == cell_size,
- errors::InvalidArgument("b_c.dim_size(0) != cell_size: ",
- b_c_tensor->dim_size(0), " vs. ", cell_size));
+ OP_REQUIRES(ctx, b_c_tensor->dim_size(0) == cell_size,
+ errors::InvalidArgument(
+ "b_c.dim_size(0) != cell_size: ", b_c_tensor->dim_size(0),
+ " vs. ", cell_size));
OP_REQUIRES(ctx, b_c_tensor->dims() == 1,
errors::InvalidArgument("Rank of b_c must be 1 ",
b_c_tensor->dims(), " vs. 1"));
// Shape of 'r' must be [batch_size, cell_size]
- OP_REQUIRES(
- ctx, r_tensor->dim_size(0) == batch_size,
- errors::InvalidArgument("r.dims(0) != batch_size: ",
- r_tensor->dim_size(0), " vs. ", batch_size));
- OP_REQUIRES(
- ctx, r_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("r.dims(1) != cell_size: ",
- r_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, r_tensor->dim_size(0) == batch_size,
+ errors::InvalidArgument(
+ "r.dims(0) != batch_size: ", r_tensor->dim_size(0), " vs. ",
+ batch_size));
+ OP_REQUIRES(ctx, r_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "r.dims(1) != cell_size: ", r_tensor->dim_size(1), " vs. ",
+ cell_size));
// Shape of 'u' must be [batch_size, cell_size]
- OP_REQUIRES(
- ctx, u_tensor->dim_size(0) == batch_size,
- errors::InvalidArgument("u.dims(0) != batch_size: ",
- u_tensor->dim_size(0), " vs. ", batch_size));
- OP_REQUIRES(
- ctx, u_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("u.dims(1) != cell_size: ",
- u_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, u_tensor->dim_size(0) == batch_size,
+ errors::InvalidArgument(
+ "u.dims(0) != batch_size: ", u_tensor->dim_size(0), " vs. ",
+ batch_size));
+ OP_REQUIRES(ctx, u_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "u.dims(1) != cell_size: ", u_tensor->dim_size(1), " vs. ",
+ cell_size));
// Shape of 'c' must be [batch_size, cell_size]
- OP_REQUIRES(
- ctx, c_tensor->dim_size(0) == batch_size,
- errors::InvalidArgument("c.dims(0) != batch_size: ",
- c_tensor->dim_size(0), " vs. ", batch_size));
- OP_REQUIRES(
- ctx, c_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("c.dims(1) != cell_size: ",
- c_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, c_tensor->dim_size(0) == batch_size,
+ errors::InvalidArgument(
+ "c.dims(0) != batch_size: ", c_tensor->dim_size(0), " vs. ",
+ batch_size));
+ OP_REQUIRES(ctx, c_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "c.dims(1) != cell_size: ", c_tensor->dim_size(1), " vs. ",
+ cell_size));
// Shape of 'd_h' must be [batch_size, cell_size]
- OP_REQUIRES(
- ctx, d_h_tensor->dim_size(0) == batch_size,
- errors::InvalidArgument("d_h.dims(0) != batch_size: ",
- d_h_tensor->dim_size(0), " vs. ", batch_size));
- OP_REQUIRES(
- ctx, d_h_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("d_h.dims(1) != cell_size: ",
- d_h_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, d_h_tensor->dim_size(0) == batch_size,
+ errors::InvalidArgument(
+ "d_h.dims(0) != batch_size: ", d_h_tensor->dim_size(0),
+ " vs. ", batch_size));
+ OP_REQUIRES(ctx, d_h_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "d_h.dims(1) != cell_size: ", d_h_tensor->dim_size(1),
+ " vs. ", cell_size));
// Create output tensors.
Tensor* d_x_tensor = nullptr;
diff --git a/tensorflow/contrib/rnn/kernels/lstm_ops.cc b/tensorflow/contrib/rnn/kernels/lstm_ops.cc
index 941a457fd3..5e7cf0ce84 100644
--- a/tensorflow/contrib/rnn/kernels/lstm_ops.cc
+++ b/tensorflow/contrib/rnn/kernels/lstm_ops.cc
@@ -281,23 +281,23 @@ class LSTMBlockCellOp : public OpKernel {
h_prev_tensor->dim_size(0), " vs. ",
batch_size));
OP_REQUIRES(ctx, h_prev_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("h_prev.dims(1) != cell_size: ",
- h_prev_tensor->dim_size(1), " vs. ",
- cell_size));
+ errors::InvalidArgument(
+ "h_prev.dims(1) != cell_size: ", h_prev_tensor->dim_size(1),
+ " vs. ", cell_size));
OP_REQUIRES(ctx, w_tensor->dim_size(0) == input_size + cell_size,
errors::InvalidArgument(
"w.dim_size(0) != input_size + cell_size: ",
w_tensor->dim_size(0), " vs. ", input_size + cell_size));
- OP_REQUIRES(
- ctx, w_tensor->dim_size(1) == cell_size * 4,
- errors::InvalidArgument("w.dim_size(1) != cell_size * 4: ",
- w_tensor->dim_size(1), " vs. ", cell_size * 4));
+ OP_REQUIRES(ctx, w_tensor->dim_size(1) == cell_size * 4,
+ errors::InvalidArgument(
+ "w.dim_size(1) != cell_size * 4: ", w_tensor->dim_size(1),
+ " vs. ", cell_size * 4));
- OP_REQUIRES(
- ctx, b_tensor->dim_size(0) == cell_size * 4,
- errors::InvalidArgument("b.dim_size(0) != cell_size * 4: ",
- b_tensor->dim_size(0), " vs. ", cell_size * 4));
+ OP_REQUIRES(ctx, b_tensor->dim_size(0) == cell_size * 4,
+ errors::InvalidArgument(
+ "b.dim_size(0) != cell_size * 4: ", b_tensor->dim_size(0),
+ " vs. ", cell_size * 4));
// Allocate our output tensors.
Tensor* i_tensor = nullptr;
@@ -484,77 +484,77 @@ class LSTMBlockCellGradOp : public OpKernel {
h_prev_tensor->dim_size(0), " vs. ",
batch_size));
OP_REQUIRES(ctx, h_prev_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("h_prev.dims(1) != cell_size: ",
- h_prev_tensor->dim_size(1), " vs. ",
- cell_size));
+ errors::InvalidArgument(
+ "h_prev.dims(1) != cell_size: ", h_prev_tensor->dim_size(1),
+ " vs. ", cell_size));
OP_REQUIRES(ctx, w_tensor->dim_size(0) == input_size + cell_size,
errors::InvalidArgument(
"w.dim_size(0) != input_size + cell_size: ",
w_tensor->dim_size(0), " vs. ", input_size + cell_size));
- OP_REQUIRES(
- ctx, w_tensor->dim_size(1) == cell_size * 4,
- errors::InvalidArgument("w.dim_size(1) != cell_size * 4: ",
- w_tensor->dim_size(1), " vs. ", cell_size * 4));
+ OP_REQUIRES(ctx, w_tensor->dim_size(1) == cell_size * 4,
+ errors::InvalidArgument(
+ "w.dim_size(1) != cell_size * 4: ", w_tensor->dim_size(1),
+ " vs. ", cell_size * 4));
- OP_REQUIRES(
- ctx, b_tensor->dim_size(0) == cell_size * 4,
- errors::InvalidArgument("b.dim_size(0) != cell_size * 4: ",
- b_tensor->dim_size(0), " vs. ", cell_size * 4));
+ OP_REQUIRES(ctx, b_tensor->dim_size(0) == cell_size * 4,
+ errors::InvalidArgument(
+ "b.dim_size(0) != cell_size * 4: ", b_tensor->dim_size(0),
+ " vs. ", cell_size * 4));
- OP_REQUIRES(
- ctx, i_tensor->dim_size(0) == batch_size,
- errors::InvalidArgument("i.dim_size(0) != batch_size: ",
- i_tensor->dim_size(0), " vs. ", batch_size));
- OP_REQUIRES(
- ctx, i_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("i.dim_size(1) != cell_size: ",
- i_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, i_tensor->dim_size(0) == batch_size,
+ errors::InvalidArgument(
+ "i.dim_size(0) != batch_size: ", i_tensor->dim_size(0),
+ " vs. ", batch_size));
+ OP_REQUIRES(ctx, i_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "i.dim_size(1) != cell_size: ", i_tensor->dim_size(1),
+ " vs. ", cell_size));
- OP_REQUIRES(
- ctx, cs_tensor->dim_size(0) == batch_size,
- errors::InvalidArgument("cs.dim_size(0) != batch_size: ",
- cs_tensor->dim_size(0), " vs. ", batch_size));
- OP_REQUIRES(
- ctx, cs_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("cs.dim_size(1) != cell_size: ",
- cs_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, cs_tensor->dim_size(0) == batch_size,
+ errors::InvalidArgument(
+ "cs.dim_size(0) != batch_size: ", cs_tensor->dim_size(0),
+ " vs. ", batch_size));
+ OP_REQUIRES(ctx, cs_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "cs.dim_size(1) != cell_size: ", cs_tensor->dim_size(1),
+ " vs. ", cell_size));
- OP_REQUIRES(
- ctx, f_tensor->dim_size(0) == batch_size,
- errors::InvalidArgument("f.dim_size(0) != batch_size: ",
- f_tensor->dim_size(0), " vs. ", batch_size));
- OP_REQUIRES(
- ctx, f_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("i.dim_size(1) != cell_size: ",
- f_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, f_tensor->dim_size(0) == batch_size,
+ errors::InvalidArgument(
+ "f.dim_size(0) != batch_size: ", f_tensor->dim_size(0),
+ " vs. ", batch_size));
+ OP_REQUIRES(ctx, f_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "i.dim_size(1) != cell_size: ", f_tensor->dim_size(1),
+ " vs. ", cell_size));
- OP_REQUIRES(
- ctx, o_tensor->dim_size(0) == batch_size,
- errors::InvalidArgument("o.dim_size(0) != batch_size: ",
- o_tensor->dim_size(0), " vs. ", batch_size));
- OP_REQUIRES(
- ctx, o_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("o.dim_size(1) != cell_size: ",
- o_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, o_tensor->dim_size(0) == batch_size,
+ errors::InvalidArgument(
+ "o.dim_size(0) != batch_size: ", o_tensor->dim_size(0),
+ " vs. ", batch_size));
+ OP_REQUIRES(ctx, o_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "o.dim_size(1) != cell_size: ", o_tensor->dim_size(1),
+ " vs. ", cell_size));
- OP_REQUIRES(
- ctx, ci_tensor->dim_size(0) == batch_size,
- errors::InvalidArgument("ci.dim_size(0) != batch_size: ",
- ci_tensor->dim_size(0), " vs. ", batch_size));
- OP_REQUIRES(
- ctx, ci_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("ci.dim_size(1) != cell_size: ",
- ci_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, ci_tensor->dim_size(0) == batch_size,
+ errors::InvalidArgument(
+ "ci.dim_size(0) != batch_size: ", ci_tensor->dim_size(0),
+ " vs. ", batch_size));
+ OP_REQUIRES(ctx, ci_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "ci.dim_size(1) != cell_size: ", ci_tensor->dim_size(1),
+ " vs. ", cell_size));
- OP_REQUIRES(
- ctx, co_tensor->dim_size(0) == batch_size,
- errors::InvalidArgument("co.dim_size(0) != batch_size: ",
- co_tensor->dim_size(0), " vs. ", batch_size));
- OP_REQUIRES(
- ctx, co_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("co.dim_size(1) != cell_size: ",
- co_tensor->dim_size(1), " vs. ", cell_size));
+ OP_REQUIRES(ctx, co_tensor->dim_size(0) == batch_size,
+ errors::InvalidArgument(
+ "co.dim_size(0) != batch_size: ", co_tensor->dim_size(0),
+ " vs. ", batch_size));
+ OP_REQUIRES(ctx, co_tensor->dim_size(1) == cell_size,
+ errors::InvalidArgument(
+ "co.dim_size(1) != cell_size: ", co_tensor->dim_size(1),
+ " vs. ", cell_size));
OP_REQUIRES(ctx, cs_grad_tensor->dim_size(0) == batch_size,
errors::InvalidArgument(
@@ -860,9 +860,9 @@ class BlockLSTMOp : public OpKernel {
h_prev_tensor->dim_size(0), " vs. ",
batch_size));
OP_REQUIRES(ctx, h_prev_tensor->dim_size(1) == cell_size,
- errors::InvalidArgument("h_prev.dims(1) != cell_size: ",
- h_prev_tensor->dim_size(1), " vs. ",
- cell_size));
+ errors::InvalidArgument(
+ "h_prev.dims(1) != cell_size: ", h_prev_tensor->dim_size(1),
+ " vs. ", cell_size));
const Tensor* w_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("w", &w_tensor));
@@ -872,46 +872,46 @@ class BlockLSTMOp : public OpKernel {
errors::InvalidArgument(
"w.dim_size(0) != input_size + cell_size: ",
w_tensor->dim_size(0), " vs. ", input_size + cell_size));
- OP_REQUIRES(
- ctx, w_tensor->dim_size(1) == cell_size * 4,
- errors::InvalidArgument("w.dim_size(1) != cell_size * 4: ",
- w_tensor->dim_size(1), " vs. ", cell_size * 4));
+ OP_REQUIRES(ctx, w_tensor->dim_size(1) == cell_size * 4,
+ errors::InvalidArgument(
+ "w.dim_size(1) != cell_size * 4: ", w_tensor->dim_size(1),
+ " vs. ", cell_size * 4));
const Tensor* wci_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("wci", &wci_tensor));
OP_REQUIRES(ctx, wci_tensor->dims() == 1,
errors::InvalidArgument("wci must be 1D"));
- OP_REQUIRES(
- ctx, wci_tensor->dim_size(0) == cell_size,
- errors::InvalidArgument("wci.dim_size(0) != cell_size: ",
- wci_tensor->dim_size(0), " vs. ", cell_size));
+ OP_REQUIRES(ctx, wci_tensor->dim_size(0) == cell_size,
+ errors::InvalidArgument(
+ "wci.dim_size(0) != cell_size: ", wci_tensor->dim_size(0),
+ " vs. ", cell_size));
const Tensor* wcf_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("wcf", &wcf_tensor));
OP_REQUIRES(ctx, wcf_tensor->dims() == 1,
errors::InvalidArgument("wcf must be 1D"));
- OP_REQUIRES(
- ctx, wcf_tensor->dim_size(0) == cell_size,
- errors::InvalidArgument("wcf.dim_size(0) != cell_size: ",
- wcf_tensor->dim_size(0), " vs. ", cell_size));
+ OP_REQUIRES(ctx, wcf_tensor->dim_size(0) == cell_size,
+ errors::InvalidArgument(
+ "wcf.dim_size(0) != cell_size: ", wcf_tensor->dim_size(0),
+ " vs. ", cell_size));
const Tensor* wco_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("wco", &wco_tensor));
OP_REQUIRES(ctx, wco_tensor->dims() == 1,
errors::InvalidArgument("wco must be 1D"));
- OP_REQUIRES(
- ctx, wco_tensor->dim_size(0) == cell_size,
- errors::InvalidArgument("wco.dim_size(0) != cell_size: ",
- wco_tensor->dim_size(0), " vs. ", cell_size));
+ OP_REQUIRES(ctx, wco_tensor->dim_size(0) == cell_size,
+ errors::InvalidArgument(
+ "wco.dim_size(0) != cell_size: ", wco_tensor->dim_size(0),
+ " vs. ", cell_size));
const Tensor* b_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("b", &b_tensor));
OP_REQUIRES(ctx, b_tensor->dims() == 1,
errors::InvalidArgument("b must be 1D"));
- OP_REQUIRES(
- ctx, b_tensor->dim_size(0) == cell_size * 4,
- errors::InvalidArgument("b.dim_size(0) != cell_size * 4: ",
- b_tensor->dim_size(0), " vs. ", cell_size * 4));
+ OP_REQUIRES(ctx, b_tensor->dim_size(0) == cell_size * 4,
+ errors::InvalidArgument(
+ "b.dim_size(0) != cell_size * 4: ", b_tensor->dim_size(0),
+ " vs. ", cell_size * 4));
TensorShape batch_cell_shape({timelen, batch_size, cell_size});
Tensor* i_out;
@@ -1065,9 +1065,9 @@ class BlockLSTMGradOp : public OpKernel {
OP_REQUIRES_OK(ctx, ctx->input("w", &w_tensor));
const int64 cell_size = w_tensor->dim_size(1) / 4;
OP_REQUIRES(ctx, input_size + cell_size == w_tensor->dim_size(0),
- errors::InvalidArgument("w matrix rows don't match: ",
- input_size + cell_size, " vs. ",
- w_tensor->dim_size(0)));
+ errors::InvalidArgument(
+ "w matrix rows don't match: ", input_size + cell_size,
+ " vs. ", w_tensor->dim_size(0)));
const Tensor* wci_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->input("wci", &wci_tensor));
@@ -1193,7 +1193,6 @@ class BlockLSTMGradOp : public OpKernel {
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::v(),
batch_cell_shape, &h_grad_tensor));
-
const Device& device = ctx->eigen_device<Device>();
functor::TensorZero<Device, T>()(device, cs_grad_tensor.flat<float>());
diff --git a/tensorflow/contrib/rnn/kernels/lstm_ops.h b/tensorflow/contrib/rnn/kernels/lstm_ops.h
index bc6b85f3f1..d23cedc234 100644
--- a/tensorflow/contrib/rnn/kernels/lstm_ops.h
+++ b/tensorflow/contrib/rnn/kernels/lstm_ops.h
@@ -92,7 +92,6 @@ struct TensorZeroPadding {
}
};
-
struct LSTMBlockCell {
LSTMBlockCell(const int batch_size, const int input_size, const int cell_size)
: batch_size_(batch_size),
diff --git a/tensorflow/contrib/rnn/ops/lstm_ops_test.cc b/tensorflow/contrib/rnn/ops/lstm_ops_test.cc
index 544cd163c5..68184b643e 100644
--- a/tensorflow/contrib/rnn/ops/lstm_ops_test.cc
+++ b/tensorflow/contrib/rnn/ops/lstm_ops_test.cc
@@ -149,8 +149,9 @@ TEST_F(LSTMOpsTest, BlockLSTMGrad_ShapeFn) {
INFER_ERROR("must be rank 1", op, "?;?;?;?;?;?;?;?;[1,?]" + suffix);
// Output with all input knowns makes known rank outputs.
- INFER_OK(op, JoinedCopies("?", 18), "[?,?,?];" + JoinedCopies("[?,?]", 3) +
- ";" + JoinedCopies("[?]", 4));
+ INFER_OK(
+ op, JoinedCopies("?", 18),
+ "[?,?,?];" + JoinedCopies("[?,?]", 3) + ";" + JoinedCopies("[?]", 4));
// Output with copies input shapes to output.
string input = strings::StrCat("?;[?,?,?];", JoinedCopies("[?,?]", 3), ";",