aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/rnn
diff options
context:
space:
mode:
authorGravatar Yifei Feng <yifeif@google.com>2018-01-26 16:53:59 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-26 16:59:01 -0800
commitaee7f95a027accc94f1f9130f0cfaecd9399bc1d (patch)
tree6b8484915bf631f18b2fa0561a73549d9bf19fad /tensorflow/contrib/rnn
parente95537708f070a98607393a8f60bc61f1611a77b (diff)
Add C0301 line-too-long error to pylint sanity check.
PiperOrigin-RevId: 183467186
Diffstat (limited to 'tensorflow/contrib/rnn')
-rw-r--r--tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py389
1 files changed, 221 insertions, 168 deletions
diff --git a/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py b/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
index 0258d7202d..57521c6a9b 100644
--- a/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
+++ b/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
@@ -45,6 +45,7 @@ from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
+
class Plus1RNNCell(rnn_lib.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@@ -160,8 +161,7 @@ class RNNTest(test.TestCase):
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
@@ -178,10 +178,9 @@ class RNNTest(test.TestCase):
self.assertAllClose(v, input_value + 1.0)
# Final state
- self.assertAllClose(
- values[-1],
- max_length * np.ones(
- (batch_size, input_size), dtype=np.float32))
+ self.assertAllClose(values[-1],
+ max_length * np.ones(
+ (batch_size, input_size), dtype=np.float32))
def testDropout(self):
cell = Plus1RNNCell()
@@ -191,8 +190,7 @@ class RNNTest(test.TestCase):
input_size = 5
max_length = 8
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
@@ -207,8 +205,10 @@ class RNNTest(test.TestCase):
with self.test_session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
- full_dropout_values = sess.run(dropped_outputs,
- feed_dict={inputs[0]: input_value})
+ full_dropout_values = sess.run(
+ dropped_outputs, feed_dict={
+ inputs[0]: input_value
+ })
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
@@ -222,8 +222,7 @@ class RNNTest(test.TestCase):
input_size = 5
max_length = 8
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("drop_scope"):
dynamic_outputs, dynamic_state = rnn.static_rnn(
@@ -234,12 +233,16 @@ class RNNTest(test.TestCase):
input_value = np.random.randn(batch_size, input_size)
dynamic_values = sess.run(
dynamic_outputs,
- feed_dict={inputs[0]: input_value,
- sequence_length: [2, 3]})
+ feed_dict={
+ inputs[0]: input_value,
+ sequence_length: [2, 3]
+ })
dynamic_state_value = sess.run(
[dynamic_state],
- feed_dict={inputs[0]: input_value,
- sequence_length: [2, 3]})
+ feed_dict={
+ inputs[0]: input_value,
+ sequence_length: [2, 3]
+ })
# outputs are fully calculated for t = 0, 1
for v in dynamic_values[:2]:
@@ -289,8 +292,7 @@ class RNNTest(test.TestCase):
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
return rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=scope)
@@ -316,8 +318,7 @@ class LSTMTest(test.TestCase):
cell = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
@@ -343,8 +344,7 @@ class LSTMTest(test.TestCase):
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
@@ -374,8 +374,7 @@ class LSTMTest(test.TestCase):
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
@@ -388,7 +387,9 @@ class LSTMTest(test.TestCase):
input_value = np.random.randn(batch_size, input_size)
(last_state_value, saved_state_value) = sess.run(
[state, state_saver.saved_state["save_lstm"]],
- feed_dict={inputs[0]: input_value})
+ feed_dict={
+ inputs[0]: input_value
+ })
self.assertAllEqual(last_state_value, saved_state_value)
def testNoProjNoShardingTupleStateSaver(self):
@@ -406,8 +407,7 @@ class LSTMTest(test.TestCase):
initializer=initializer,
state_is_tuple=True)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
@@ -420,7 +420,9 @@ class LSTMTest(test.TestCase):
input_value = np.random.randn(batch_size, input_size)
last_and_saved_states = sess.run(
state + (state_saver.saved_state["c"], state_saver.saved_state["m"]),
- feed_dict={inputs[0]: input_value})
+ feed_dict={
+ inputs[0]: input_value
+ })
self.assertEqual(4, len(last_and_saved_states))
self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:])
@@ -432,16 +434,17 @@ class LSTMTest(test.TestCase):
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
- state_saver = TestStateSaver(batch_size, {
- "c0": num_units,
- "m0": num_units,
- "c1": num_units + 1,
- "m1": num_units + 1,
- "c2": num_units + 2,
- "m2": num_units + 2,
- "c3": num_units + 3,
- "m3": num_units + 3
- })
+ state_saver = TestStateSaver(
+ batch_size, {
+ "c0": num_units,
+ "m0": num_units,
+ "c1": num_units + 1,
+ "m1": num_units + 1,
+ "c2": num_units + 2,
+ "m2": num_units + 2,
+ "c3": num_units + 3,
+ "m3": num_units + 3
+ })
def _cell(i):
return rnn_cell.LSTMCell(
@@ -459,8 +462,7 @@ class LSTMTest(test.TestCase):
self.assertEqual(len(cell.state_size[i]), 2)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
state_names = (("c0", "m0"), ("c1", "m1"), ("c2", "m2"), ("c3", "m3"))
@@ -475,10 +477,15 @@ class LSTMTest(test.TestCase):
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
- last_states = sess.run(list(nest.flatten(state)),
- feed_dict={inputs[0]: input_value})
- saved_states = sess.run(list(state_saver.saved_state.values()),
- feed_dict={inputs[0]: input_value})
+ last_states = sess.run(
+ list(nest.flatten(state)), feed_dict={
+ inputs[0]: input_value
+ })
+ saved_states = sess.run(
+ list(state_saver.saved_state.values()),
+ feed_dict={
+ inputs[0]: input_value
+ })
self.assertEqual(8, len(last_states))
self.assertEqual(8, len(saved_states))
flat_state_names = nest.flatten(state_names)
@@ -499,8 +506,7 @@ class LSTMTest(test.TestCase):
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(None, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
@@ -526,8 +532,7 @@ class LSTMTest(test.TestCase):
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(None, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell_notuple = rnn_cell.LSTMCell(
num_units,
@@ -569,14 +574,20 @@ class LSTMTest(test.TestCase):
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
- outputs_notuple_v = sess.run(outputs_notuple,
- feed_dict={inputs[0]: input_value})
- outputs_tuple_v = sess.run(outputs_tuple,
- feed_dict={inputs[0]: input_value})
+ outputs_notuple_v = sess.run(
+ outputs_notuple, feed_dict={
+ inputs[0]: input_value
+ })
+ outputs_tuple_v = sess.run(
+ outputs_tuple, feed_dict={
+ inputs[0]: input_value
+ })
self.assertAllEqual(outputs_notuple_v, outputs_tuple_v)
- (state_notuple_v,) = sess.run((state_notuple,),
- feed_dict={inputs[0]: input_value})
+ (state_notuple_v,) = sess.run(
+ (state_notuple,), feed_dict={
+ inputs[0]: input_value
+ })
state_tuple_v = sess.run(state_tuple, feed_dict={inputs[0]: input_value})
self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v))
@@ -593,8 +604,7 @@ class LSTMTest(test.TestCase):
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(None, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
@@ -625,8 +635,7 @@ class LSTMTest(test.TestCase):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float64, shape=(None, input_size))
+ array_ops.placeholder(dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
@@ -661,8 +670,7 @@ class LSTMTest(test.TestCase):
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(None, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
initializer = init_ops.constant_initializer(0.001)
@@ -721,8 +729,7 @@ class LSTMTest(test.TestCase):
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float64, shape=(None, input_size))
+ array_ops.placeholder(dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
@@ -743,16 +750,21 @@ class LSTMTest(test.TestCase):
self.assertEqual(len(outputs), len(inputs))
- variables_lib.global_variables_initializer().run(
- feed_dict={sequence_length: [2, 3]})
+ variables_lib.global_variables_initializer().run(feed_dict={
+ sequence_length: [2, 3]
+ })
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(
- outputs, feed_dict={inputs[0]: input_value,
- sequence_length: [2, 3]})
+ outputs, feed_dict={
+ inputs[0]: input_value,
+ sequence_length: [2, 3]
+ })
state_value = sess.run(
- [state], feed_dict={inputs[0]: input_value,
- sequence_length: [2, 3]})
+ [state], feed_dict={
+ inputs[0]: input_value,
+ sequence_length: [2, 3]
+ })
self.assertEqual(values[0].dtype, input_value.dtype)
self.assertEqual(state_value[0].dtype, input_value.dtype)
@@ -767,8 +779,7 @@ class LSTMTest(test.TestCase):
initializer_d = init_ops.random_uniform_initializer(
-1, 1, seed=self._seed + 1)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(None, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
@@ -792,8 +803,10 @@ class LSTMTest(test.TestCase):
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
- output_values = sess.run(outputs0 + outputs1 + outputs2,
- feed_dict={inputs[0]: input_value})
+ output_values = sess.run(
+ outputs0 + outputs1 + outputs2, feed_dict={
+ inputs[0]: input_value
+ })
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:2 * max_length]
outputs2_values = output_values[2 * max_length:]
@@ -814,8 +827,7 @@ class LSTMTest(test.TestCase):
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(None, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
@@ -833,8 +845,10 @@ class LSTMTest(test.TestCase):
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
- output_values = sess.run(outputs0 + outputs1,
- feed_dict={inputs[0]: input_value})
+ output_values = sess.run(
+ outputs0 + outputs1, feed_dict={
+ inputs[0]: input_value
+ })
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
@@ -861,8 +875,7 @@ class LSTMTest(test.TestCase):
-0.01, 0.01, seed=self._seed)
if in_graph_mode:
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(None, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
else:
inputs = max_length * [
@@ -939,8 +952,7 @@ class LSTMTest(test.TestCase):
-0.01, 0.01, seed=self._seed)
if in_graph_mode:
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(None, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
else:
inputs = max_length * [
@@ -1100,8 +1112,8 @@ class LSTMTest(test.TestCase):
# Test gradients to inputs and variables w.r.t. outputs & final state
static_grad_values = sess.run(static_gradients, feed_dict=feeds)
- static_individual_grad_values = sess.run(static_individual_gradients,
- feed_dict=feeds)
+ static_individual_grad_values = sess.run(
+ static_individual_gradients, feed_dict=feeds)
static_individual_var_grad_values = sess.run(
static_individual_variable_gradients, feed_dict=feeds)
@@ -1148,8 +1160,10 @@ class LSTMTest(test.TestCase):
# Generate gradients of several individual outputs w.r.t. inputs
dynamic_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
- for y in
- [split_outputs_dynamic[0], split_outputs_dynamic[-1], state_dynamic]
+ for y in [
+ split_outputs_dynamic[0], split_outputs_dynamic[-1],
+ state_dynamic
+ ]
])
# Generate gradients of individual variables w.r.t. inputs
@@ -1159,8 +1173,10 @@ class LSTMTest(test.TestCase):
"Count of trainable variables: %d" % len(trainable_variables))
dynamic_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
- for y in
- [split_outputs_dynamic[0], split_outputs_dynamic[-1], state_dynamic]
+ for y in [
+ split_outputs_dynamic[0], split_outputs_dynamic[-1],
+ state_dynamic
+ ]
])
# Test forward pass
@@ -1170,8 +1186,8 @@ class LSTMTest(test.TestCase):
# Test gradients to inputs and variables w.r.t. outputs & final state
dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)
- dynamic_individual_grad_values = sess.run(dynamic_individual_gradients,
- feed_dict=feeds)
+ dynamic_individual_grad_values = sess.run(
+ dynamic_individual_gradients, feed_dict=feeds)
dynamic_individual_var_grad_values = sess.run(
dynamic_individual_variable_gradients, feed_dict=feeds)
@@ -1207,8 +1223,8 @@ class LSTMTest(test.TestCase):
for i, (a, b) in enumerate(
zip(static_individual_var_grad_values,
dynamic_individual_var_grad_values)):
- tf_logging.info("Comparing individual variable gradients iteration %d" %
- i)
+ tf_logging.info(
+ "Comparing individual variable gradients iteration %d" % i)
self.assertAllEqual(a, b)
@test_util.run_in_graph_and_eager_modes()
@@ -1223,10 +1239,7 @@ class BidirectionalRNNTest(test.TestCase):
self._seed = 23489
np.random.seed(self._seed)
- def _createBidirectionalRNN(self,
- use_shape,
- use_sequence_length,
- scope=None):
+ def _createBidirectionalRNN(self, use_shape, use_sequence_length, scope=None):
num_units = 3
input_size = 5
batch_size = 2
@@ -1270,8 +1283,10 @@ class BidirectionalRNNTest(test.TestCase):
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
- feed_dict={inputs[0]: input_value,
- sequence_length: [2, 3]})
+ feed_dict={
+ inputs[0]: input_value,
+ sequence_length: [2, 3]
+ })
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
@@ -1312,8 +1327,10 @@ class BidirectionalRNNTest(test.TestCase):
input_value, inputs, outputs, state_fw, state_bw, _ = (
self._createBidirectionalRNN(use_shape, False))
variables_lib.global_variables_initializer().run()
- out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw],
- feed_dict={inputs[0]: input_value})
+ out, s_fw, s_bw = sess.run(
+ [outputs, state_fw, state_bw], feed_dict={
+ inputs[0]: input_value
+ })
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
@@ -1396,13 +1413,11 @@ class BidirectionalRNNTest(test.TestCase):
use_time_major, use_sequence_length):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
- self._createBidirectionalDynamicRNN(use_shape,
- use_state_tuple, use_time_major,
- use_sequence_length))
+ self._createBidirectionalDynamicRNN(
+ use_shape, use_state_tuple, use_time_major, use_sequence_length))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
- feed_dict = (
- {sequence_length: [2, 3]} if use_sequence_length else {})
+ feed_dict = ({sequence_length: [2, 3]} if use_sequence_length else {})
feed_dict.update({inputs[0]: input_value})
if use_state_tuple:
out, c_fw, m_fw, c_bw, m_bw = sess.run(
@@ -1538,8 +1553,7 @@ class MultiDimensionalLSTMTest(test.TestCase):
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(None,) + input_size)
+ array_ops.placeholder(dtypes.float32, shape=(None,) + input_size)
]
inputs_using_dim = max_length * [
array_ops.placeholder(
@@ -1585,14 +1599,22 @@ class MultiDimensionalLSTMTest(test.TestCase):
input_total_size = (batch_size,) + input_size
input_value = np.random.randn(*input_total_size)
- outputs_static_v = sess.run(outputs_static,
- feed_dict={inputs[0]: input_value})
- outputs_dynamic_v = sess.run(outputs_dynamic,
- feed_dict={inputs[0]: input_value})
- outputs_bid_v = sess.run(outputs_bid,
- feed_dict={inputs_using_dim[0]: input_value})
- outputs_sav_v = sess.run(outputs_sav,
- feed_dict={inputs_using_dim[0]: input_value})
+ outputs_static_v = sess.run(
+ outputs_static, feed_dict={
+ inputs[0]: input_value
+ })
+ outputs_dynamic_v = sess.run(
+ outputs_dynamic, feed_dict={
+ inputs[0]: input_value
+ })
+ outputs_bid_v = sess.run(
+ outputs_bid, feed_dict={
+ inputs_using_dim[0]: input_value
+ })
+ outputs_sav_v = sess.run(
+ outputs_sav, feed_dict={
+ inputs_using_dim[0]: input_value
+ })
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
self.assertAllEqual(outputs_static_v, outputs_sav_v)
@@ -1602,16 +1624,26 @@ class MultiDimensionalLSTMTest(test.TestCase):
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
- state_static_v = sess.run(state_static,
- feed_dict={inputs[0]: input_value})
- state_dynamic_v = sess.run(state_dynamic,
- feed_dict={inputs[0]: input_value})
- state_bid_fw_v = sess.run(state_fw,
- feed_dict={inputs_using_dim[0]: input_value})
- state_bid_bw_v = sess.run(state_bw,
- feed_dict={inputs_using_dim[0]: input_value})
- state_sav_v = sess.run(state_sav,
- feed_dict={inputs_using_dim[0]: input_value})
+ state_static_v = sess.run(
+ state_static, feed_dict={
+ inputs[0]: input_value
+ })
+ state_dynamic_v = sess.run(
+ state_dynamic, feed_dict={
+ inputs[0]: input_value
+ })
+ state_bid_fw_v = sess.run(
+ state_fw, feed_dict={
+ inputs_using_dim[0]: input_value
+ })
+ state_bid_bw_v = sess.run(
+ state_bw, feed_dict={
+ inputs_using_dim[0]: input_value
+ })
+ state_sav_v = sess.run(
+ state_sav, feed_dict={
+ inputs_using_dim[0]: input_value
+ })
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
@@ -1633,16 +1665,17 @@ class NestedLSTMTest(test.TestCase):
with self.test_session(graph=ops_lib.Graph()) as sess:
state_saver = TestStateSaver(batch_size, state_size)
single_input = (array_ops.placeholder(
- dtypes.float32, shape=(None, input_size)), array_ops.placeholder(
- dtypes.float32, shape=(None, input_size)))
+ dtypes.float32, shape=(None, input_size)),
+ array_ops.placeholder(
+ dtypes.float32, shape=(None, input_size)))
inputs = max_length * [single_input]
inputs_c = (array_ops.stack([input_[0] for input_ in inputs]),
array_ops.stack([input_[1] for input_ in inputs]))
- single_input_using_dim = (
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size)),
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size)))
+ single_input_using_dim = (array_ops.placeholder(
+ dtypes.float32, shape=(batch_size, input_size)),
+ array_ops.placeholder(
+ dtypes.float32,
+ shape=(batch_size, input_size)))
inputs_using_dim = max_length * [single_input_using_dim]
# Create a cell for the whole test. This is fine because the cell has no
@@ -1688,14 +1721,22 @@ class NestedLSTMTest(test.TestCase):
input_total_size = (batch_size, input_size)
input_value = (np.random.randn(*input_total_size),
np.random.randn(*input_total_size))
- outputs_dynamic_v = sess.run(outputs_dynamic,
- feed_dict={single_input: input_value})
- outputs_static_v = sess.run(outputs_static,
- feed_dict={single_input: input_value})
- outputs_sav_v = sess.run(outputs_sav,
- feed_dict={single_input_using_dim: input_value})
- outputs_bid_v = sess.run(outputs_bid,
- feed_dict={single_input_using_dim: input_value})
+ outputs_dynamic_v = sess.run(
+ outputs_dynamic, feed_dict={
+ single_input: input_value
+ })
+ outputs_static_v = sess.run(
+ outputs_static, feed_dict={
+ single_input: input_value
+ })
+ outputs_sav_v = sess.run(
+ outputs_sav, feed_dict={
+ single_input_using_dim: input_value
+ })
+ outputs_bid_v = sess.run(
+ outputs_bid, feed_dict={
+ single_input_using_dim: input_value
+ })
self.assertAllEqual(outputs_static_v,
np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))
@@ -1706,16 +1747,26 @@ class NestedLSTMTest(test.TestCase):
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
- state_dynamic_v = sess.run(state_dynamic,
- feed_dict={single_input: input_value})
- state_static_v = sess.run(state_static,
- feed_dict={single_input: input_value})
- state_bid_fw_v = sess.run(state_fw,
- feed_dict={single_input_using_dim: input_value})
- state_bid_bw_v = sess.run(state_bw,
- feed_dict={single_input_using_dim: input_value})
- state_sav_v = sess.run(state_sav,
- feed_dict={single_input_using_dim: input_value})
+ state_dynamic_v = sess.run(
+ state_dynamic, feed_dict={
+ single_input: input_value
+ })
+ state_static_v = sess.run(
+ state_static, feed_dict={
+ single_input: input_value
+ })
+ state_bid_fw_v = sess.run(
+ state_fw, feed_dict={
+ single_input_using_dim: input_value
+ })
+ state_bid_bw_v = sess.run(
+ state_bw, feed_dict={
+ single_input_using_dim: input_value
+ })
+ state_sav_v = sess.run(
+ state_sav, feed_dict={
+ single_input_using_dim: input_value
+ })
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
@@ -1764,8 +1815,7 @@ class StateSaverRNNTest(test.TestCase):
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
- array_ops.placeholder(
- dtypes.float32, shape=(batch_size, input_size))
+ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
return rnn.static_state_saving_rnn(
cell,
@@ -1931,8 +1981,10 @@ class RawRNNTest(test.TestCase):
(outputs_val, outputs_dynamic_rnn_val, final_state_val,
final_state_dynamic_rnn_val) = sess.run(
[outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],
- feed_dict={inputs: rand_input,
- sequence_length: rand_seq_len})
+ feed_dict={
+ inputs: rand_input,
+ sequence_length: rand_seq_len
+ })
self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)
self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)
@@ -1945,12 +1997,16 @@ class RawRNNTest(test.TestCase):
self.assertEqual(len(gradients), len(gradients_dynamic_rnn))
gradients_val = sess.run(
gradients,
- feed_dict={inputs: rand_input,
- sequence_length: rand_seq_len})
+ feed_dict={
+ inputs: rand_input,
+ sequence_length: rand_seq_len
+ })
gradients_dynamic_rnn_val = sess.run(
gradients_dynamic_rnn,
- feed_dict={inputs: rand_input,
- sequence_length: rand_seq_len})
+ feed_dict={
+ inputs: rand_input,
+ sequence_length: rand_seq_len
+ })
self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))
input_gradients_val = gradients_val[0]
input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]
@@ -2067,14 +2123,13 @@ class RawRNNTest(test.TestCase):
def loop_fn(time_, cell_output, cell_state, _):
if cell_output is None:
- emit_output = (array_ops.zeros(
- [2, 3], dtype=dtypes.int32), array_ops.zeros(
- [unknown_dim], dtype=dtypes.int64))
+ emit_output = (array_ops.zeros([2, 3], dtype=dtypes.int32),
+ array_ops.zeros([unknown_dim], dtype=dtypes.int64))
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
- emit_output = (array_ops.ones(
- [batch_size, 2, 3], dtype=dtypes.int32), array_ops.ones(
- [batch_size, unknown_dim], dtype=dtypes.int64))
+ emit_output = (array_ops.ones([batch_size, 2, 3], dtype=dtypes.int32),
+ array_ops.ones(
+ [batch_size, unknown_dim], dtype=dtypes.int64))
next_state = cell_state
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
@@ -2193,8 +2248,8 @@ class TensorArrayOnCorrectDeviceTest(test.TestCase):
cell = rnn_cell.LSTMCell(num_units, use_peepholes=True)
gpu_cell = DeviceWrapperCell(cell, cell_device)
- inputs = np.random.randn(batch_size, time_steps,
- input_size).astype(np.float32)
+ inputs = np.random.randn(batch_size, time_steps, input_size).astype(
+ np.float32)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
if input_device is not None:
@@ -2262,8 +2317,7 @@ class TensorArrayOnCorrectDeviceTest(test.TestCase):
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
- rnn_device="/cpu:0", cell_device="/cpu:0",
- input_device=gpu_dev)
+ rnn_device="/cpu:0", cell_device="/cpu:0", input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
@@ -2278,8 +2332,7 @@ class TensorArrayOnCorrectDeviceTest(test.TestCase):
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
- run_metadata = self._execute_rnn_on(
- input_device=gpu_dev)
+ run_metadata = self._execute_rnn_on(input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):