aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/rnn
diff options
context:
space:
mode:
authorGravatar Jacques Pienaar <jpienaar@google.com>2018-03-21 12:07:51 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-03-21 12:10:30 -0700
commit2d0531d72c7dcbb0e149cafdd3a16ee8c3ff357a (patch)
tree1179ecdd684d10c6549f85aa95f33dd79463a093 /tensorflow/contrib/rnn
parentcbede3ea7574b36f429710bc08617d08455bcc21 (diff)
Merge changes from github.
PiperOrigin-RevId: 189945839
Diffstat (limited to 'tensorflow/contrib/rnn')
-rw-r--r--tensorflow/contrib/rnn/ops/gru_ops.cc2
-rw-r--r--tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py2
-rw-r--r--tensorflow/contrib/rnn/python/ops/lstm_ops.py3
-rw-r--r--tensorflow/contrib/rnn/python/ops/rnn_cell.py10
4 files changed, 8 insertions, 9 deletions
diff --git a/tensorflow/contrib/rnn/ops/gru_ops.cc b/tensorflow/contrib/rnn/ops/gru_ops.cc
index e91d1e8a80..9c8e40851a 100644
--- a/tensorflow/contrib/rnn/ops/gru_ops.cc
+++ b/tensorflow/contrib/rnn/ops/gru_ops.cc
@@ -69,7 +69,7 @@ Element-wise dot product of a and b is represented by ab
Element-wise dot product is represented by \circ
Matrix multiplication is represented by *
-Baises are initialized with :
+Biases are initialized with :
`b_ru` - constant_initializer(1.0)
`b_c` - constant_initializer(0.0)
diff --git a/tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py b/tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py
index 7957edf68c..ffd2421894 100644
--- a/tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py
+++ b/tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py
@@ -54,7 +54,7 @@ def blocks_match(sess, use_peephole):
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("test", initializer=initializer):
- # magic naming so that the cells pick up these variables and resuse them
+ # magic naming so that the cells pick up these variables and reuse them
if use_peephole:
wci = variable_scope.get_variable(
"rnn/lstm_cell/w_i_diag", shape=[cell_size], dtype=dtypes.float32)
diff --git a/tensorflow/contrib/rnn/python/ops/lstm_ops.py b/tensorflow/contrib/rnn/python/ops/lstm_ops.py
index 4eb4fbcd92..9e61fc54d1 100644
--- a/tensorflow/contrib/rnn/python/ops/lstm_ops.py
+++ b/tensorflow/contrib/rnn/python/ops/lstm_ops.py
@@ -480,8 +480,7 @@ class LSTMBlockWrapper(base_layer.Layer):
"""Run this LSTM on inputs, starting from the given state.
Args:
- inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
- or a list of `time_len` tensors of shape `[batch_size, input_size]`.
+ inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
diff --git a/tensorflow/contrib/rnn/python/ops/rnn_cell.py b/tensorflow/contrib/rnn/python/ops/rnn_cell.py
index 73f2607d84..2f6ae9f367 100644
--- a/tensorflow/contrib/rnn/python/ops/rnn_cell.py
+++ b/tensorflow/contrib/rnn/python/ops/rnn_cell.py
@@ -534,7 +534,7 @@ class GridLSTMCell(rnn_cell_impl.RNNCell):
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
- matrix. If > 1,the weight matrix is stored across num_unit_shards.
+ matrix. If > 1, the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
@@ -993,7 +993,7 @@ class BidirectionalGridLSTMCell(GridLSTMCell):
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
- matrix. If > 1,the weight matrix is stored across num_unit_shards.
+ matrix. If > 1, the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
@@ -2133,7 +2133,7 @@ class Conv1DLSTMCell(ConvLSTMCell):
def __init__(self, name="conv_1d_lstm_cell", **kwargs):
"""Construct Conv1DLSTM. See `ConvLSTMCell` for more details."""
- super(Conv1DLSTMCell, self).__init__(conv_ndims=1, **kwargs)
+ super(Conv1DLSTMCell, self).__init__(conv_ndims=1, name=name, **kwargs)
class Conv2DLSTMCell(ConvLSTMCell):
@@ -2144,7 +2144,7 @@ class Conv2DLSTMCell(ConvLSTMCell):
def __init__(self, name="conv_2d_lstm_cell", **kwargs):
"""Construct Conv2DLSTM. See `ConvLSTMCell` for more details."""
- super(Conv2DLSTMCell, self).__init__(conv_ndims=2, **kwargs)
+ super(Conv2DLSTMCell, self).__init__(conv_ndims=2, name=name, **kwargs)
class Conv3DLSTMCell(ConvLSTMCell):
@@ -2155,7 +2155,7 @@ class Conv3DLSTMCell(ConvLSTMCell):
def __init__(self, name="conv_3d_lstm_cell", **kwargs):
"""Construct Conv3DLSTM. See `ConvLSTMCell` for more details."""
- super(Conv3DLSTMCell, self).__init__(conv_ndims=3, **kwargs)
+ super(Conv3DLSTMCell, self).__init__(conv_ndims=3, name=name, **kwargs)
def _conv(args, filter_size, num_features, bias, bias_start=0.0):