aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Pavithra Vijay <psv@google.com>2018-07-30 11:52:57 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-07-30 11:56:57 -0700
commit4027262f588466ee4a419c28521a5b53aad12e5a (patch)
tree8445d665a13104ec9bbe1eed1dda1ba8ff2f36e4
parent3328243cdca5d08f56fc64c582ce2f3b80630259 (diff)
De-dup few eager mode tests, remove some unused functions and params.
PiperOrigin-RevId: 206621105
-rw-r--r--tensorflow/python/keras/engine/network.py3
-rw-r--r--tensorflow/python/keras/engine/training_arrays.py18
-rw-r--r--tensorflow/python/keras/engine/training_eager.py6
-rw-r--r--tensorflow/python/keras/engine/training_eager_test.py278
-rw-r--r--tensorflow/python/keras/engine/training_test.py30
5 files changed, 26 insertions, 309 deletions
diff --git a/tensorflow/python/keras/engine/network.py b/tensorflow/python/keras/engine/network.py
index d9b031d080..20a29dbf20 100644
--- a/tensorflow/python/keras/engine/network.py
+++ b/tensorflow/python/keras/engine/network.py
@@ -967,7 +967,7 @@ class Network(base_layer.Layer):
mask: List of masks (tensors or None).
Returns:
- Three lists: output_tensors, output_masks, output_shapes
+ Two lists: output_tensors, output_masks
"""
# Note: masking support is relevant mainly for Keras.
# It cannot be factored out without having the fully reimplement the network
@@ -1034,7 +1034,6 @@ class Network(base_layer.Layer):
else:
output_masks = [None for _ in output_tensors]
computed_tensors = [computed_tensor]
- computed_masks = [computed_mask]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
diff --git a/tensorflow/python/keras/engine/training_arrays.py b/tensorflow/python/keras/engine/training_arrays.py
index adefffab11..6572e2c344 100644
--- a/tensorflow/python/keras/engine/training_arrays.py
+++ b/tensorflow/python/keras/engine/training_arrays.py
@@ -50,7 +50,6 @@ def fit_loop(model,
val_targets=None,
val_sample_weights=None,
shuffle=True,
- callback_metrics=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
@@ -69,8 +68,6 @@ def fit_loop(model,
val_targets: List of target arrays.
val_sample_weights: Optional list of sample weight arrays.
shuffle: Whether to shuffle the data at the beginning of each epoch
- callback_metrics: List of strings, the display names of the metrics
- passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training
@@ -121,9 +118,7 @@ def fit_loop(model,
out_labels = model.metrics_names
if do_validation:
- callback_metrics = copy.copy(out_labels) + [
- 'val_' + n for n in out_labels
- ]
+ callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
# need to create the test_function before start of the first epoch
# because TensorBoard callback on_epoch_begin adds summary to the
# list of fetches of the test_function
@@ -197,9 +192,7 @@ def fit_loop(model,
if steps_per_epoch is not None:
# Step-wise fit loop.
for step_index in range(steps_per_epoch):
- batch_logs = {}
- batch_logs['batch'] = step_index
- batch_logs['size'] = 1
+ batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
try:
outs = f(ins)
@@ -388,7 +381,9 @@ def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None):
return outs
-def test_loop(model, inputs, targets,
+def test_loop(model,
+ inputs,
+ targets,
sample_weights=None,
batch_size=None,
verbose=0,
@@ -485,8 +480,7 @@ def test_loop(model, inputs, targets,
if isinstance(batch_outs, list):
if batch_index == 0:
- for batch_out in enumerate(batch_outs):
- outs.append(0.)
+ outs.extend([0.] * len(batch_outs))
for i, batch_out in enumerate(batch_outs):
if i in stateful_metric_indices:
outs[i] = batch_out
diff --git a/tensorflow/python/keras/engine/training_eager.py b/tensorflow/python/keras/engine/training_eager.py
index b1149ab5b5..0b25b827ad 100644
--- a/tensorflow/python/keras/engine/training_eager.py
+++ b/tensorflow/python/keras/engine/training_eager.py
@@ -589,7 +589,6 @@ def fit_loop(model,
verbose=1,
callbacks=None,
shuffle=True,
- callback_metrics=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
@@ -611,10 +610,6 @@ def fit_loop(model,
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
shuffle: Whether to shuffle the data at the beginning of each epoch
- callback_metrics: List of strings, the display names of the metrics
- passed to the callbacks. They should be the
- concatenation of list the display names of the outputs of
- `f` and the list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
@@ -646,6 +641,7 @@ def fit_loop(model,
num_train_samples = None
out_labels = None
+ callback_metrics = None
if model._is_compiled:
out_labels = model.metrics_names
if do_validation:
diff --git a/tensorflow/python/keras/engine/training_eager_test.py b/tensorflow/python/keras/engine/training_eager_test.py
index bdb3035129..b0f57f0770 100644
--- a/tensorflow/python/keras/engine/training_eager_test.py
+++ b/tensorflow/python/keras/engine/training_eager_test.py
@@ -31,284 +31,6 @@ from tensorflow.python.training.rmsprop import RMSPropOptimizer
class TrainingTest(test.TestCase):
- def test_fit_on_arrays(self):
- a = keras.layers.Input(shape=(3,), name='input_a')
- b = keras.layers.Input(shape=(3,), name='input_b')
-
- dense = keras.layers.Dense(4, name='dense')
- c = dense(a)
- d = dense(b)
- e = keras.layers.Dropout(0.5, name='dropout')(c)
-
- model = keras.models.Model([a, b], [d, e])
-
- optimizer = RMSPropOptimizer(learning_rate=0.001)
- loss = 'mse'
- loss_weights = [1., 0.5]
- metrics = ['mae']
- model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
-
- input_a_np = np.random.random((10, 3))
- input_b_np = np.random.random((10, 3))
-
- output_d_np = np.random.random((10, 4))
- output_e_np = np.random.random((10, 4))
-
- # Test fit at different verbosity
- model.fit(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- epochs=1,
- batch_size=5,
- verbose=0)
- model.fit(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- epochs=1,
- batch_size=5,
- verbose=1)
- model.fit(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- epochs=2,
- batch_size=5,
- verbose=2)
-
- # Test with validation data
- model.fit(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- validation_data=([input_a_np, input_b_np], [output_d_np,
- output_e_np]),
- epochs=1,
- batch_size=5,
- verbose=0)
- model.fit(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- validation_data=([input_a_np, input_b_np], [output_d_np,
- output_e_np]),
- epochs=2,
- batch_size=5,
- verbose=1)
- model.fit(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- validation_data=([input_a_np, input_b_np], [output_d_np,
- output_e_np]),
- epochs=2,
- batch_size=5,
- verbose=2)
- model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
-
- # Test with validation split
- model.fit(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- epochs=2,
- batch_size=5,
- verbose=0,
- validation_split=0.2)
-
- # Test with dictionary inputs
- model.fit(
- {
- 'input_a': input_a_np,
- 'input_b': input_b_np
- }, {'dense': output_d_np,
- 'dropout': output_e_np},
- epochs=1,
- batch_size=5,
- verbose=0)
- model.fit(
- {
- 'input_a': input_a_np,
- 'input_b': input_b_np
- }, {'dense': output_d_np,
- 'dropout': output_e_np},
- epochs=1,
- batch_size=5,
- verbose=1)
- model.fit(
- {
- 'input_a': input_a_np,
- 'input_b': input_b_np
- }, {'dense': output_d_np,
- 'dropout': output_e_np},
- validation_data=({'input_a': input_a_np,
- 'input_b': input_b_np
- },
- {
- 'dense': output_d_np,
- 'dropout': output_e_np
- }),
- epochs=1,
- batch_size=5,
- verbose=0)
- model.train_on_batch({
- 'input_a': input_a_np,
- 'input_b': input_b_np
- }, {'dense': output_d_np,
- 'dropout': output_e_np})
- # Test with lists for loss, metrics
- loss = ['mae', 'mse']
- metrics = ['acc', 'mae']
- model.compile(optimizer, loss, metrics=metrics)
- model.fit(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- epochs=1,
- batch_size=5,
- verbose=0)
-
- # Test with dictionaries for loss, metrics, loss weights
- loss = {'dense': 'mse', 'dropout': 'mae'}
- loss_weights = {'dense': 1., 'dropout': 0.5}
- metrics = {'dense': 'mse', 'dropout': 'mae'}
- model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
- model.fit(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- epochs=1,
- batch_size=5,
- verbose=0)
-
- # Invalid use cases
- with self.assertRaises(AttributeError):
- model.fit(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- epochs=1,
- validation_data=([input_a_np, input_b_np], 0, 0),
- verbose=0)
- with self.assertRaises(ValueError):
- model.train_on_batch({'input_a': input_a_np},
- [output_d_np, output_e_np])
- with self.assertRaises(ValueError):
- model.train_on_batch([input_a_np], [output_d_np, output_e_np])
- with self.assertRaises(AttributeError):
- model.train_on_batch(1, [output_d_np, output_e_np])
- with self.assertRaises(ValueError):
- model.train_on_batch(input_a_np, [output_d_np, output_e_np])
- with self.assertRaises(ValueError):
- bad_input = np.random.random((11, 3))
- model.train_on_batch([bad_input, input_b_np],
- [output_d_np, output_e_np])
- with self.assertRaises(ValueError):
- bad_target = np.random.random((11, 4))
- model.train_on_batch([input_a_np, input_b_np],
- [bad_target, output_e_np])
-
- # Build single-input model
- x = keras.layers.Input(shape=(3,), name='input_a')
- y = keras.layers.Dense(4)(x)
- model = keras.models.Model(x, y)
- model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
- # This will work
- model.fit([input_a_np], output_d_np, epochs=1)
- with self.assertRaises(ValueError):
- model.fit([input_a_np, input_a_np], output_d_np, epochs=1)
-
- def test_evaluate_predict_on_arrays(self):
- a = keras.layers.Input(shape=(3,), name='input_a')
- b = keras.layers.Input(shape=(3,), name='input_b')
-
- dense = keras.layers.Dense(4, name='dense')
- c = dense(a)
- d = dense(b)
- e = keras.layers.Dropout(0.5, name='dropout')(c)
-
- model = keras.models.Model([a, b], [d, e])
-
- optimizer = RMSPropOptimizer(learning_rate=0.001)
- loss = 'mse'
- loss_weights = [1., 0.5]
- metrics = ['acc', 'mae']
- model.compile(
- optimizer,
- loss,
- metrics=metrics,
- loss_weights=loss_weights,
- sample_weight_mode=None)
-
- input_a_np = np.random.random((10, 3))
- input_b_np = np.random.random((10, 3))
-
- output_d_np = np.random.random((10, 4))
- output_e_np = np.random.random((10, 4))
-
- # Test evaluate at different verbosity
- out = model.evaluate(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- batch_size=5,
- verbose=0)
- self.assertEqual(len(out), 7)
- out = model.evaluate(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- batch_size=5,
- verbose=1)
- self.assertEqual(len(out), 7)
- out = model.evaluate(
- [input_a_np, input_b_np], [output_d_np, output_e_np],
- batch_size=5,
- verbose=2)
- self.assertEqual(len(out), 7)
- out = model.test_on_batch([input_a_np, input_b_np],
- [output_d_np, output_e_np])
- self.assertEqual(len(out), 7)
-
- # Test evaluate with dictionary inputs
- model.evaluate(
- {
- 'input_a': input_a_np,
- 'input_b': input_b_np
- }, {'dense': output_d_np,
- 'dropout': output_e_np},
- batch_size=5,
- verbose=0)
- model.evaluate(
- {
- 'input_a': input_a_np,
- 'input_b': input_b_np
- }, {'dense': output_d_np,
- 'dropout': output_e_np},
- batch_size=5,
- verbose=1)
-
- # Test predict
- out = model.predict([input_a_np, input_b_np], batch_size=5)
- self.assertEqual(len(out), 2)
- out = model.predict({'input_a': input_a_np, 'input_b': input_b_np})
- self.assertEqual(len(out), 2)
- out = model.predict_on_batch({
- 'input_a': input_a_np,
- 'input_b': input_b_np
- })
- self.assertEqual(len(out), 2)
-
- def test_invalid_loss_or_metrics(self):
- num_classes = 5
- train_samples = 1000
- test_samples = 1000
- input_dim = 5
-
- model = keras.models.Sequential()
- model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
- model.add(keras.layers.Activation('relu'))
- model.add(keras.layers.Dense(num_classes))
- model.add(keras.layers.Activation('softmax'))
- model.compile(loss='categorical_crossentropy',
- optimizer=RMSPropOptimizer(learning_rate=0.001))
- np.random.seed(1337)
-
- (x_train, y_train), (_, _) = testing_utils.get_test_data(
- train_samples=train_samples,
- test_samples=test_samples,
- input_shape=(input_dim,),
- num_classes=num_classes)
-
- with self.assertRaises(ValueError):
- model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
-
- with self.assertRaises(TypeError):
- model.compile(loss='categorical_crossentropy',
- optimizer=RMSPropOptimizer(learning_rate=0.001),
- metrics=set(0))
-
- with self.assertRaises(ValueError):
- model.compile(loss=None,
- optimizer='rms')
-
def test_model_methods_with_eager_tensors_multi_io(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
diff --git a/tensorflow/python/keras/engine/training_test.py b/tensorflow/python/keras/engine/training_test.py
index 129441d159..be9b0a21d7 100644
--- a/tensorflow/python/keras/engine/training_test.py
+++ b/tensorflow/python/keras/engine/training_test.py
@@ -26,6 +26,7 @@ import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
+from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util as tf_test_util
@@ -45,6 +46,7 @@ except ImportError:
class TrainingTest(test.TestCase):
+ @tf_test_util.run_in_graph_and_eager_modes
def test_fit_on_arrays(self):
with self.test_session():
a = keras.layers.Input(shape=(3,), name='input_a')
@@ -57,7 +59,7 @@ class TrainingTest(test.TestCase):
model = keras.models.Model([a, b], [d, e])
- optimizer = 'rmsprop'
+ optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
@@ -224,7 +226,7 @@ class TrainingTest(test.TestCase):
x = keras.layers.Input(shape=(3,), name='input_a')
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
- model.compile(optimizer='rmsprop', loss='mse')
+ model.compile(optimizer, loss='mse')
# This will work
model.fit([input_a_np], output_d_np, epochs=1)
with self.assertRaises(ValueError):
@@ -240,6 +242,7 @@ class TrainingTest(test.TestCase):
batch_size=5,
verbose=2)
+ @tf_test_util.run_in_graph_and_eager_modes
def test_evaluate_predict_on_arrays(self):
with self.test_session():
a = keras.layers.Input(shape=(3,), name='input_a')
@@ -252,7 +255,7 @@ class TrainingTest(test.TestCase):
model = keras.models.Model([a, b], [d, e])
- optimizer = 'rmsprop'
+ optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
@@ -322,6 +325,7 @@ class TrainingTest(test.TestCase):
})
self.assertEqual(len(out), 2)
+ @tf_test_util.run_in_graph_and_eager_modes
def test_invalid_loss_or_metrics(self):
num_classes = 5
train_samples = 1000
@@ -334,27 +338,29 @@ class TrainingTest(test.TestCase):
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
- model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
+ optimizer = RMSPropOptimizer(learning_rate=0.001)
+ model.compile(optimizer, loss='categorical_crossentropy')
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
- with self.assertRaises(ValueError):
- model.fit(x_train, y_train)
with self.assertRaises(ValueError):
model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
with self.assertRaises(TypeError):
- model.compile(loss='categorical_crossentropy',
- optimizer='rmsprop',
- metrics=set(0))
+ model.compile(
+ optimizer, loss='categorical_crossentropy', metrics=set(0))
- with self.assertRaises(ValueError):
- model.compile(loss=None,
- optimizer='rmsprop')
+ if not context.executing_eagerly():
+ # TODO(psv): Investigate these use cases in eager mode.
+ with self.assertRaises(ValueError):
+ model.fit(x_train, y_train)
+
+ with self.assertRaises(ValueError):
+ model.compile(optimizer, loss=None)
def test_training_on_sparse_data_with_dense_placeholders(self):
if scipy_sparse is None: