diff options
author | Francois Chollet <fchollet@google.com> | 2018-08-20 18:48:13 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-08-20 18:51:19 -0700 |
commit | d29759fa5370c7fe4ba5f12ac26cba08d5bb3c4f (patch) | |
tree | c0da6cef8ac226a78e3375f505f8039cfbc0806f | |
parent | c5f27df3a5375ae75657e28d5f598e98e9ab1cc7 (diff) |
Simplify Keras unit tests by removing unnecessary session scopes and introducing a utility function for repeated code.
PiperOrigin-RevId: 209523944
-rw-r--r-- | tensorflow/python/keras/callbacks_test.py | 107 | ||||
-rw-r--r-- | tensorflow/python/keras/engine/sequential_test.py | 39 | ||||
-rw-r--r-- | tensorflow/python/keras/engine/training_test.py | 1366 | ||||
-rw-r--r-- | tensorflow/python/keras/testing_utils.py | 19 |
4 files changed, 736 insertions, 795 deletions
diff --git a/tensorflow/python/keras/callbacks_test.py b/tensorflow/python/keras/callbacks_test.py index e84e023384..7675a6586f 100644 --- a/tensorflow/python/keras/callbacks_test.py +++ b/tensorflow/python/keras/callbacks_test.py @@ -235,11 +235,8 @@ class KerasCallbacksTest(test.TestCase): num_classes=NUM_CLASSES) y_test = keras.utils.to_categorical(y_test) y_train = keras.utils.to_categorical(y_train) - model = keras.models.Sequential() - model.add( - keras.layers.Dense( - NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) - model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) + model = testing_utils.get_small_sequential_mlp( + num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', @@ -298,9 +295,8 @@ class KerasCallbacksTest(test.TestCase): test_samples=50, input_shape=(1,), num_classes=NUM_CLASSES) - model = keras.models.Sequential((keras.layers.Dense( - 1, input_dim=1, activation='relu'), keras.layers.Dense( - 1, activation='sigmoid'),)) + model = testing_utils.get_small_sequential_mlp( + num_hidden=1, num_classes=1, input_dim=1) model.compile( optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) @@ -334,11 +330,8 @@ class KerasCallbacksTest(test.TestCase): num_classes=NUM_CLASSES) y_test = keras.utils.to_categorical(y_test) y_train = keras.utils.to_categorical(y_train) - model = keras.models.Sequential() - model.add( - keras.layers.Dense( - NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) - model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) + model = testing_utils.get_small_sequential_mlp( + num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer='sgd', @@ -388,12 +381,8 @@ class KerasCallbacksTest(test.TestCase): def make_model(): random_seed.set_random_seed(1234) np.random.seed(1337) - model = keras.models.Sequential() - model.add( - keras.layers.Dense( - NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) - model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) - + model = testing_utils.get_small_sequential_mlp( + num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=0.1), @@ -498,12 +487,8 @@ class KerasCallbacksTest(test.TestCase): def make_model(): np.random.seed(1337) - model = keras.models.Sequential() - model.add( - keras.layers.Dense( - NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) - model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) - + model = testing_utils.get_small_sequential_mlp( + num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=0.1), @@ -985,9 +970,8 @@ class KerasCallbacksTest(test.TestCase): yield x, y with self.test_session(): - model = keras.models.Sequential() - model.add(keras.layers.Dense(10, input_dim=100, activation='relu')) - model.add(keras.layers.Dense(10, activation='softmax')) + model = testing_utils.get_small_sequential_mlp( + num_hidden=10, num_classes=10, input_dim=100) model.compile( loss='categorical_crossentropy', optimizer='sgd', @@ -1083,11 +1067,8 @@ class KerasCallbacksTest(test.TestCase): y_test = keras.utils.to_categorical(y_test) y_train = keras.utils.to_categorical(y_train) - model = keras.models.Sequential() - model.add( - keras.layers.Dense( - NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) - model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) + model = testing_utils.get_small_sequential_mlp( + num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) @@ -1179,40 +1160,36 @@ class KerasCallbacksTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_Tensorboard_eager(self): - with self.test_session(): - temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) - self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) - - (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( - train_samples=TRAIN_SAMPLES, - test_samples=TEST_SAMPLES, - input_shape=(INPUT_DIM,), - num_classes=NUM_CLASSES) - y_test = keras.utils.to_categorical(y_test) - y_train = keras.utils.to_categorical(y_train) - - model = keras.models.Sequential() - model.add( - keras.layers.Dense( - NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) - model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) - model.compile( - loss='binary_crossentropy', - optimizer=adam.AdamOptimizer(0.01), - metrics=['accuracy']) - - cbks = [keras.callbacks.TensorBoard(log_dir=temp_dir)] + temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) + self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) - model.fit( - x_train, - y_train, - batch_size=BATCH_SIZE, - validation_data=(x_test, y_test), - callbacks=cbks, - epochs=2, - verbose=0) + (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( + train_samples=TRAIN_SAMPLES, + test_samples=TEST_SAMPLES, + input_shape=(INPUT_DIM,), + num_classes=NUM_CLASSES) + y_test = keras.utils.to_categorical(y_test) + y_train = keras.utils.to_categorical(y_train) - self.assertTrue(os.path.exists(temp_dir)) + model = testing_utils.get_small_sequential_mlp( + num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) + model.compile( + loss='binary_crossentropy', + optimizer=adam.AdamOptimizer(0.01), + metrics=['accuracy']) + + cbks = [keras.callbacks.TensorBoard(log_dir=temp_dir)] + + model.fit( + x_train, + y_train, + batch_size=BATCH_SIZE, + validation_data=(x_test, y_test), + callbacks=cbks, + epochs=2, + verbose=0) + + self.assertTrue(os.path.exists(temp_dir)) def test_RemoteMonitorWithJsonPayload(self): if requests is None: diff --git a/tensorflow/python/keras/engine/sequential_test.py b/tensorflow/python/keras/engine/sequential_test.py index 3f8e120df0..28af8d61bc 100644 --- a/tensorflow/python/keras/engine/sequential_test.py +++ b/tensorflow/python/keras/engine/sequential_test.py @@ -25,22 +25,12 @@ from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import function from tensorflow.python.framework import test_util as tf_test_util +from tensorflow.python.keras import testing_utils from tensorflow.python.ops import array_ops from tensorflow.python.platform import test from tensorflow.python.training import rmsprop -def _get_small_mlp(num_hidden, num_classes, input_dim=None): - model = keras.models.Sequential() - if input_dim: - model.add(keras.layers.Dense(num_hidden, activation='relu', - input_dim=input_dim)) - else: - model.add(keras.layers.Dense(num_hidden, activation='relu')) - model.add(keras.layers.Dense(num_classes, activation='softmax')) - return model - - class TestSequential(test.TestCase, parameterized.TestCase): """Most Sequential model API tests are covered in `training_test.py`. """ @@ -63,7 +53,8 @@ class TestSequential(test.TestCase, parameterized.TestCase): batch_size = 5 num_classes = 2 - model = _get_small_mlp(num_hidden, num_classes, input_dim) + model = testing_utils.get_small_sequential_mlp( + num_hidden, num_classes, input_dim) model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3)) x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) @@ -94,7 +85,7 @@ class TestSequential(test.TestCase, parameterized.TestCase): batch_size = 5 num_classes = 2 - model = _get_small_mlp(num_hidden, num_classes) + model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3), @@ -118,7 +109,7 @@ class TestSequential(test.TestCase, parameterized.TestCase): num_samples = 50 steps_per_epoch = 10 - model = _get_small_mlp(num_hidden, num_classes) + model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3), @@ -145,9 +136,9 @@ class TestSequential(test.TestCase, parameterized.TestCase): def get_model(): if deferred: - model = _get_small_mlp(10, 4) + model = testing_utils.get_small_sequential_mlp(10, 4) else: - model = _get_small_mlp(10, 4, input_dim=3) + model = testing_utils.get_small_sequential_mlp(10, 4, input_dim=3) model.compile( optimizer=rmsprop.RMSPropOptimizer(1e-3), loss='categorical_crossentropy', @@ -262,7 +253,7 @@ class TestSequential(test.TestCase, parameterized.TestCase): batch_size = 5 num_classes = 2 - model = _get_small_mlp(num_hidden, num_classes) + model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) model.compile( loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3), @@ -284,21 +275,21 @@ class TestSequential(test.TestCase, parameterized.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_sequential_shape_inference_deferred(self): - model = _get_small_mlp(4, 5) + model = testing_utils.get_small_sequential_mlp(4, 5) output_shape = model.compute_output_shape((None, 7)) self.assertEqual(tuple(output_shape.as_list()), (None, 5)) @tf_test_util.run_in_graph_and_eager_modes def test_sequential_build_deferred(self): - model = _get_small_mlp(4, 5) + model = testing_utils.get_small_sequential_mlp(4, 5) model.build((None, 10)) self.assertTrue(model.built) self.assertEqual(len(model.weights), 4) # Test with nested model - model = _get_small_mlp(4, 3) - inner_model = _get_small_mlp(4, 5) + model = testing_utils.get_small_sequential_mlp(4, 3) + inner_model = testing_utils.get_small_sequential_mlp(4, 5) model.add(inner_model) model.build((None, 10)) @@ -308,8 +299,8 @@ class TestSequential(test.TestCase, parameterized.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_sequential_nesting(self): - model = _get_small_mlp(4, 3) - inner_model = _get_small_mlp(4, 5) + model = testing_utils.get_small_sequential_mlp(4, 3) + inner_model = testing_utils.get_small_sequential_mlp(4, 5) model.add(inner_model) model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3)) @@ -353,7 +344,7 @@ class TestSequentialEagerIntegration(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_build_before_fit(self): # Fix for b/112433577 - model = _get_small_mlp(4, 5) + model = testing_utils.get_small_sequential_mlp(4, 5) model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3)) model.build((None, 6)) diff --git a/tensorflow/python/keras/engine/training_test.py b/tensorflow/python/keras/engine/training_test.py index 5e5135b179..8d835ed5a9 100644 --- a/tensorflow/python/keras/engine/training_test.py +++ b/tensorflow/python/keras/engine/training_test.py @@ -49,289 +49,287 @@ class TrainingTest(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_fit_on_arrays(self): - with self.test_session(): - a = keras.layers.Input(shape=(3,), name='input_a') - b = keras.layers.Input(shape=(3,), name='input_b') - - dense = keras.layers.Dense(4, name='dense') - c = dense(a) - d = dense(b) - e = keras.layers.Dropout(0.5, name='dropout')(c) - - model = keras.models.Model([a, b], [d, e]) - - optimizer = RMSPropOptimizer(learning_rate=0.001) - loss = 'mse' - loss_weights = [1., 0.5] - model.compile( - optimizer, - loss, - metrics=[metrics_module.CategoricalAccuracy(), 'mae'], - loss_weights=loss_weights) - - input_a_np = np.random.random((10, 3)) - input_b_np = np.random.random((10, 3)) - - output_d_np = np.random.random((10, 4)) - output_e_np = np.random.random((10, 4)) - - # Test fit at different verbosity - model.fit( - [input_a_np, input_b_np], [output_d_np, output_e_np], - epochs=1, - batch_size=5, - verbose=0) - model.fit( - [input_a_np, input_b_np], [output_d_np, output_e_np], - epochs=1, - batch_size=5, - verbose=1) - model.fit( - [input_a_np, input_b_np], [output_d_np, output_e_np], - epochs=2, - batch_size=5, - verbose=2) - model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np]) - - # Test model with input data as a list of lists - model.fit( - [np.ndarray.tolist(input_a_np), np.ndarray.tolist(input_b_np)], - [output_d_np, output_e_np], - epochs=2, - batch_size=5, - verbose=2) + a = keras.layers.Input(shape=(3,), name='input_a') + b = keras.layers.Input(shape=(3,), name='input_b') - # Test with validation data - model.fit( - [input_a_np, input_b_np], [output_d_np, output_e_np], - validation_data=([input_a_np, input_b_np], [output_d_np, - output_e_np]), - epochs=1, - batch_size=5, - verbose=0) - model.fit( - [input_a_np, input_b_np], [output_d_np, output_e_np], - validation_data=([input_a_np, input_b_np], [output_d_np, - output_e_np]), - epochs=2, - batch_size=5, - verbose=1) - model.fit( - [input_a_np, input_b_np], [output_d_np, output_e_np], - validation_data=([input_a_np, input_b_np], [output_d_np, - output_e_np]), - epochs=2, - batch_size=5, - verbose=2) - # Test with validation split - model.fit( - [input_a_np, input_b_np], [output_d_np, output_e_np], - epochs=2, - batch_size=5, - verbose=0, - validation_split=0.2) + dense = keras.layers.Dense(4, name='dense') + c = dense(a) + d = dense(b) + e = keras.layers.Dropout(0.5, name='dropout')(c) - # Test with dictionary inputs - model.fit( - { - 'input_a': input_a_np, - 'input_b': input_b_np - }, { - 'dense': output_d_np, - 'dropout': output_e_np - }, - epochs=1, - batch_size=5, - verbose=0) - model.fit( - { - 'input_a': input_a_np, - 'input_b': input_b_np - }, { - 'dense': output_d_np, - 'dropout': output_e_np - }, - epochs=1, - batch_size=5, - verbose=1) - model.fit( - { - 'input_a': input_a_np, - 'input_b': input_b_np - }, { - 'dense': output_d_np, - 'dropout': output_e_np - }, - validation_data=({ - 'input_a': input_a_np, - 'input_b': input_b_np - }, { - 'dense': output_d_np, - 'dropout': output_e_np - }), - epochs=1, - batch_size=5, - verbose=0) - model.train_on_batch({ - 'input_a': input_a_np, - 'input_b': input_b_np - }, { - 'dense': output_d_np, - 'dropout': output_e_np - }) - - # Test with lists for loss, metrics - loss = ['mae', 'mse'] - model.compile( - optimizer, - loss, - metrics=[metrics_module.CategoricalAccuracy(), 'mae']) - model.fit( - [input_a_np, input_b_np], [output_d_np, output_e_np], - epochs=1, - batch_size=5, - verbose=0) + model = keras.models.Model([a, b], [d, e]) - # Test with dictionaries for loss, metrics, loss weights - loss = {'dense': 'mse', 'dropout': 'mae'} - loss_weights = {'dense': 1., 'dropout': 0.5} - metrics = { - 'dense': 'mse', - 'dropout': metrics_module.CategoricalAccuracy() - } - model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights) + optimizer = RMSPropOptimizer(learning_rate=0.001) + loss = 'mse' + loss_weights = [1., 0.5] + model.compile( + optimizer, + loss, + metrics=[metrics_module.CategoricalAccuracy(), 'mae'], + loss_weights=loss_weights) + + input_a_np = np.random.random((10, 3)) + input_b_np = np.random.random((10, 3)) + + output_d_np = np.random.random((10, 4)) + output_e_np = np.random.random((10, 4)) + + # Test fit at different verbosity + model.fit( + [input_a_np, input_b_np], [output_d_np, output_e_np], + epochs=1, + batch_size=5, + verbose=0) + model.fit( + [input_a_np, input_b_np], [output_d_np, output_e_np], + epochs=1, + batch_size=5, + verbose=1) + model.fit( + [input_a_np, input_b_np], [output_d_np, output_e_np], + epochs=2, + batch_size=5, + verbose=2) + model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np]) + + # Test model with input data as a list of lists + model.fit( + [np.ndarray.tolist(input_a_np), np.ndarray.tolist(input_b_np)], + [output_d_np, output_e_np], + epochs=2, + batch_size=5, + verbose=2) + + # Test with validation data + model.fit( + [input_a_np, input_b_np], [output_d_np, output_e_np], + validation_data=([input_a_np, input_b_np], [output_d_np, + output_e_np]), + epochs=1, + batch_size=5, + verbose=0) + model.fit( + [input_a_np, input_b_np], [output_d_np, output_e_np], + validation_data=([input_a_np, input_b_np], [output_d_np, + output_e_np]), + epochs=2, + batch_size=5, + verbose=1) + model.fit( + [input_a_np, input_b_np], [output_d_np, output_e_np], + validation_data=([input_a_np, input_b_np], [output_d_np, + output_e_np]), + epochs=2, + batch_size=5, + verbose=2) + # Test with validation split + model.fit( + [input_a_np, input_b_np], [output_d_np, output_e_np], + epochs=2, + batch_size=5, + verbose=0, + validation_split=0.2) + + # Test with dictionary inputs + model.fit( + { + 'input_a': input_a_np, + 'input_b': input_b_np + }, { + 'dense': output_d_np, + 'dropout': output_e_np + }, + epochs=1, + batch_size=5, + verbose=0) + model.fit( + { + 'input_a': input_a_np, + 'input_b': input_b_np + }, { + 'dense': output_d_np, + 'dropout': output_e_np + }, + epochs=1, + batch_size=5, + verbose=1) + model.fit( + { + 'input_a': input_a_np, + 'input_b': input_b_np + }, { + 'dense': output_d_np, + 'dropout': output_e_np + }, + validation_data=({ + 'input_a': input_a_np, + 'input_b': input_b_np + }, { + 'dense': output_d_np, + 'dropout': output_e_np + }), + epochs=1, + batch_size=5, + verbose=0) + model.train_on_batch({ + 'input_a': input_a_np, + 'input_b': input_b_np + }, { + 'dense': output_d_np, + 'dropout': output_e_np + }) + + # Test with lists for loss, metrics + loss = ['mae', 'mse'] + model.compile( + optimizer, + loss, + metrics=[metrics_module.CategoricalAccuracy(), 'mae']) + model.fit( + [input_a_np, input_b_np], [output_d_np, output_e_np], + epochs=1, + batch_size=5, + verbose=0) + + # Test with dictionaries for loss, metrics, loss weights + loss = {'dense': 'mse', 'dropout': 'mae'} + loss_weights = {'dense': 1., 'dropout': 0.5} + metrics = { + 'dense': 'mse', + 'dropout': metrics_module.CategoricalAccuracy() + } + model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights) + model.fit( + [input_a_np, input_b_np], [output_d_np, output_e_np], + epochs=1, + batch_size=5, + verbose=0) + + # Invalid use cases + with self.assertRaises(ValueError): + model.train_on_batch({'input_a': input_a_np}, + [output_d_np, output_e_np]) + with self.assertRaises(AttributeError): model.fit( [input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, - batch_size=5, + validation_data=([input_a_np, input_b_np], 0, 0), verbose=0) + with self.assertRaises(ValueError): + model.train_on_batch([input_a_np], [output_d_np, output_e_np]) + with self.assertRaises(AttributeError): + model.train_on_batch(1, [output_d_np, output_e_np]) + with self.assertRaises(ValueError): + model.train_on_batch(input_a_np, [output_d_np, output_e_np]) + with self.assertRaises(ValueError): + bad_input = np.random.random((11, 3)) + model.train_on_batch([bad_input, input_b_np], + [output_d_np, output_e_np]) + with self.assertRaises(ValueError): + bad_target = np.random.random((11, 4)) + model.train_on_batch([input_a_np, input_b_np], + [bad_target, output_e_np]) + + # Build single-input model + x = keras.layers.Input(shape=(3,), name='input_a') + y = keras.layers.Dense(4)(x) + model = keras.models.Model(x, y) + model.compile(optimizer, loss='mse') + # This will work + model.fit([input_a_np], output_d_np, epochs=1) + with self.assertRaises(ValueError): + model.fit([input_a_np, input_a_np], output_d_np, epochs=1) - # Invalid use cases - with self.assertRaises(ValueError): - model.train_on_batch({'input_a': input_a_np}, - [output_d_np, output_e_np]) - with self.assertRaises(AttributeError): - model.fit( - [input_a_np, input_b_np], [output_d_np, output_e_np], - epochs=1, - validation_data=([input_a_np, input_b_np], 0, 0), - verbose=0) - with self.assertRaises(ValueError): - model.train_on_batch([input_a_np], [output_d_np, output_e_np]) - with self.assertRaises(AttributeError): - model.train_on_batch(1, [output_d_np, output_e_np]) - with self.assertRaises(ValueError): - model.train_on_batch(input_a_np, [output_d_np, output_e_np]) - with self.assertRaises(ValueError): - bad_input = np.random.random((11, 3)) - model.train_on_batch([bad_input, input_b_np], - [output_d_np, output_e_np]) - with self.assertRaises(ValueError): - bad_target = np.random.random((11, 4)) - model.train_on_batch([input_a_np, input_b_np], - [bad_target, output_e_np]) - - # Build single-input model - x = keras.layers.Input(shape=(3,), name='input_a') - y = keras.layers.Dense(4)(x) - model = keras.models.Model(x, y) - model.compile(optimizer, loss='mse') - # This will work - model.fit([input_a_np], output_d_np, epochs=1) - with self.assertRaises(ValueError): - model.fit([input_a_np, input_a_np], output_d_np, epochs=1) - - # Test model on a list of floats - input_a_np = np.random.random((10, 3)) - input_b_np = np.random.random((10, 4)) + # Test model on a list of floats + input_a_np = np.random.random((10, 3)) + input_b_np = np.random.random((10, 4)) - model.fit([np.ndarray.tolist(input_a_np)], - [np.ndarray.tolist(input_b_np)], - epochs=2, - batch_size=5, - verbose=2) + model.fit([np.ndarray.tolist(input_a_np)], + [np.ndarray.tolist(input_b_np)], + epochs=2, + batch_size=5, + verbose=2) @tf_test_util.run_in_graph_and_eager_modes def test_evaluate_predict_on_arrays(self): - with self.test_session(): - a = keras.layers.Input(shape=(3,), name='input_a') - b = keras.layers.Input(shape=(3,), name='input_b') - - dense = keras.layers.Dense(4, name='dense') - c = dense(a) - d = dense(b) - e = keras.layers.Dropout(0.5, name='dropout')(c) - - model = keras.models.Model([a, b], [d, e]) - - optimizer = RMSPropOptimizer(learning_rate=0.001) - loss = 'mse' - loss_weights = [1., 0.5] - model.compile( - optimizer, - loss, - metrics=['mae', metrics_module.CategoricalAccuracy()], - loss_weights=loss_weights, - sample_weight_mode=None) + a = keras.layers.Input(shape=(3,), name='input_a') + b = keras.layers.Input(shape=(3,), name='input_b') - input_a_np = np.random.random((10, 3)) - input_b_np = np.random.random((10, 3)) + dense = keras.layers.Dense(4, name='dense') + c = dense(a) + d = dense(b) + e = keras.layers.Dropout(0.5, name='dropout')(c) - output_d_np = np.random.random((10, 4)) - output_e_np = np.random.random((10, 4)) + model = keras.models.Model([a, b], [d, e]) - # Test evaluate at different verbosity - out = model.evaluate( - [input_a_np, input_b_np], [output_d_np, output_e_np], - batch_size=5, - verbose=0) - self.assertEqual(len(out), 7) - out = model.evaluate( - [input_a_np, input_b_np], [output_d_np, output_e_np], - batch_size=5, - verbose=1) - self.assertEqual(len(out), 7) - out = model.evaluate( - [input_a_np, input_b_np], [output_d_np, output_e_np], - batch_size=5, - verbose=2) - self.assertEqual(len(out), 7) - out = model.test_on_batch([input_a_np, input_b_np], - [output_d_np, output_e_np]) - self.assertEqual(len(out), 7) - - # Test evaluate with dictionary inputs - model.evaluate( - { - 'input_a': input_a_np, - 'input_b': input_b_np - }, { - 'dense': output_d_np, - 'dropout': output_e_np - }, - batch_size=5, - verbose=0) - model.evaluate( - { - 'input_a': input_a_np, - 'input_b': input_b_np - }, { - 'dense': output_d_np, - 'dropout': output_e_np - }, - batch_size=5, - verbose=1) - - # Test predict - out = model.predict([input_a_np, input_b_np], batch_size=5) - self.assertEqual(len(out), 2) - out = model.predict({'input_a': input_a_np, 'input_b': input_b_np}) - self.assertEqual(len(out), 2) - out = model.predict_on_batch({ - 'input_a': input_a_np, - 'input_b': input_b_np - }) - self.assertEqual(len(out), 2) + optimizer = RMSPropOptimizer(learning_rate=0.001) + loss = 'mse' + loss_weights = [1., 0.5] + model.compile( + optimizer, + loss, + metrics=['mae', metrics_module.CategoricalAccuracy()], + loss_weights=loss_weights, + sample_weight_mode=None) + + input_a_np = np.random.random((10, 3)) + input_b_np = np.random.random((10, 3)) + + output_d_np = np.random.random((10, 4)) + output_e_np = np.random.random((10, 4)) + + # Test evaluate at different verbosity + out = model.evaluate( + [input_a_np, input_b_np], [output_d_np, output_e_np], + batch_size=5, + verbose=0) + self.assertEqual(len(out), 7) + out = model.evaluate( + [input_a_np, input_b_np], [output_d_np, output_e_np], + batch_size=5, + verbose=1) + self.assertEqual(len(out), 7) + out = model.evaluate( + [input_a_np, input_b_np], [output_d_np, output_e_np], + batch_size=5, + verbose=2) + self.assertEqual(len(out), 7) + out = model.test_on_batch([input_a_np, input_b_np], + [output_d_np, output_e_np]) + self.assertEqual(len(out), 7) + + # Test evaluate with dictionary inputs + model.evaluate( + { + 'input_a': input_a_np, + 'input_b': input_b_np + }, { + 'dense': output_d_np, + 'dropout': output_e_np + }, + batch_size=5, + verbose=0) + model.evaluate( + { + 'input_a': input_a_np, + 'input_b': input_b_np + }, { + 'dense': output_d_np, + 'dropout': output_e_np + }, + batch_size=5, + verbose=1) + + # Test predict + out = model.predict([input_a_np, input_b_np], batch_size=5) + self.assertEqual(len(out), 2) + out = model.predict({'input_a': input_a_np, 'input_b': input_b_np}) + self.assertEqual(len(out), 2) + out = model.predict_on_batch({ + 'input_a': input_a_np, + 'input_b': input_b_np + }) + self.assertEqual(len(out), 2) @tf_test_util.run_in_graph_and_eager_modes def test_invalid_loss(self): @@ -340,31 +338,27 @@ class TrainingTest(test.TestCase): test_samples = 1000 input_dim = 5 - with self.test_session(): - model = keras.models.Sequential() - model.add(keras.layers.Dense(10, input_shape=(input_dim,))) - model.add(keras.layers.Activation('relu')) - model.add(keras.layers.Dense(num_classes)) - model.add(keras.layers.Activation('softmax')) - optimizer = RMSPropOptimizer(learning_rate=0.001) - model.compile(optimizer, loss='categorical_crossentropy') - np.random.seed(1337) - (x_train, y_train), (_, _) = testing_utils.get_test_data( - train_samples=train_samples, - test_samples=test_samples, - input_shape=(input_dim,), - num_classes=num_classes) + model = testing_utils.get_small_sequential_mlp( + num_hidden=10, num_classes=num_classes, input_dim=input_dim) + optimizer = RMSPropOptimizer(learning_rate=0.001) + model.compile(optimizer, loss='categorical_crossentropy') + np.random.seed(1337) + (x_train, y_train), (_, _) = testing_utils.get_test_data( + train_samples=train_samples, + test_samples=test_samples, + input_shape=(input_dim,), + num_classes=num_classes) - with self.assertRaises(ValueError): - model.fit(x_train, np.concatenate([y_train, y_train], axis=-1)) + with self.assertRaises(ValueError): + model.fit(x_train, np.concatenate([y_train, y_train], axis=-1)) - if not context.executing_eagerly(): - # TODO(psv): Investigate these use cases in eager mode. - with self.assertRaises(ValueError): - model.fit(x_train, y_train) + if not context.executing_eagerly(): + # TODO(psv): Investigate these use cases in eager mode. + with self.assertRaises(ValueError): + model.fit(x_train, y_train) - with self.assertRaises(ValueError): - model.compile(optimizer, loss=None) + with self.assertRaises(ValueError): + model.compile(optimizer, loss=None) def test_training_on_sparse_data_with_dense_placeholders(self): if scipy_sparse is None: @@ -468,67 +462,63 @@ class LossWeightingTest(test.TestCase): input_dim = 5 learning_rate = 0.001 - with self.test_session(): - model = keras.models.Sequential() - model.add(keras.layers.Dense(10, input_shape=(input_dim,))) - model.add(keras.layers.Activation('relu')) - model.add(keras.layers.Dense(num_classes)) - model.add(keras.layers.Activation('softmax')) - model.compile( - loss='categorical_crossentropy', - metrics=['acc'], - weighted_metrics=['mae'], - optimizer=RMSPropOptimizer(learning_rate=learning_rate)) - - np.random.seed(1337) - (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( - train_samples=train_samples, - test_samples=test_samples, - input_shape=(input_dim,), - num_classes=num_classes) - int_y_test = y_test.copy() - int_y_train = y_train.copy() - # convert class vectors to binary class matrices - y_train = keras.utils.to_categorical(y_train, num_classes) - y_test = keras.utils.to_categorical(y_test, num_classes) - test_ids = np.where(int_y_test == np.array(weighted_class))[0] - - class_weight = dict([(i, 1.) for i in range(num_classes)]) - class_weight[weighted_class] = 2. - - sample_weight = np.ones((y_train.shape[0])) - sample_weight[int_y_train == weighted_class] = 2. - - model.fit( - x_train, - y_train, - batch_size=batch_size, - epochs=epochs // 3, - verbose=0, - class_weight=class_weight, - validation_data=(x_train, y_train, sample_weight)) - model.fit( - x_train, - y_train, - batch_size=batch_size, - epochs=epochs // 2, - verbose=0, - class_weight=class_weight) - model.fit( - x_train, - y_train, - batch_size=batch_size, - epochs=epochs // 2, - verbose=0, - class_weight=class_weight, - validation_split=0.1) + model = testing_utils.get_small_sequential_mlp( + num_hidden=10, num_classes=num_classes, input_dim=input_dim) + model.compile( + loss='categorical_crossentropy', + metrics=['acc'], + weighted_metrics=['mae'], + optimizer=RMSPropOptimizer(learning_rate=learning_rate)) + + np.random.seed(1337) + (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( + train_samples=train_samples, + test_samples=test_samples, + input_shape=(input_dim,), + num_classes=num_classes) + int_y_test = y_test.copy() + int_y_train = y_train.copy() + # convert class vectors to binary class matrices + y_train = keras.utils.to_categorical(y_train, num_classes) + y_test = keras.utils.to_categorical(y_test, num_classes) + test_ids = np.where(int_y_test == np.array(weighted_class))[0] + + class_weight = dict([(i, 1.) for i in range(num_classes)]) + class_weight[weighted_class] = 2. - model.train_on_batch( - x_train[:batch_size], y_train[:batch_size], class_weight=class_weight) - ref_score = model.evaluate(x_test, y_test, verbose=0) - score = model.evaluate( - x_test[test_ids, :], y_test[test_ids, :], verbose=0) - self.assertLess(score[0], ref_score[0]) + sample_weight = np.ones((y_train.shape[0])) + sample_weight[int_y_train == weighted_class] = 2. + + model.fit( + x_train, + y_train, + batch_size=batch_size, + epochs=epochs // 3, + verbose=0, + class_weight=class_weight, + validation_data=(x_train, y_train, sample_weight)) + model.fit( + x_train, + y_train, + batch_size=batch_size, + epochs=epochs // 2, + verbose=0, + class_weight=class_weight) + model.fit( + x_train, + y_train, + batch_size=batch_size, + epochs=epochs // 2, + verbose=0, + class_weight=class_weight, + validation_split=0.1) + + model.train_on_batch( + x_train[:batch_size], y_train[:batch_size], class_weight=class_weight) + ref_score = model.evaluate(x_test, y_test, verbose=0) + score = model.evaluate( + x_test[test_ids, :], y_test[test_ids, :], verbose=0) + self.assertLess(score[0], ref_score[0]) @tf_test_util.run_in_graph_and_eager_modes def test_sample_weights(self): @@ -541,63 +531,59 @@ class LossWeightingTest(test.TestCase): input_dim = 5 learning_rate = 0.001 - with self.test_session(): - model = keras.models.Sequential() - model.add(keras.layers.Dense(10, input_shape=(input_dim,))) - model.add(keras.layers.Activation('relu')) - model.add(keras.layers.Dense(num_classes)) - model.add(keras.layers.Activation('softmax')) - model.compile( - RMSPropOptimizer(learning_rate=learning_rate), - metrics=['acc'], - weighted_metrics=['mae'], - loss='categorical_crossentropy') - - np.random.seed(43) - (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( - train_samples=train_samples, - test_samples=test_samples, - input_shape=(input_dim,), - num_classes=num_classes) - int_y_test = y_test.copy() - int_y_train = y_train.copy() - # convert class vectors to binary class matrices - y_train = keras.utils.to_categorical(y_train, num_classes) - y_test = keras.utils.to_categorical(y_test, num_classes) - test_ids = np.where(int_y_test == np.array(weighted_class))[0] - - sample_weight = np.ones((y_train.shape[0])) - sample_weight[int_y_train == weighted_class] = 2. - - model.fit( - x_train, - y_train, - batch_size=batch_size, - epochs=epochs // 3, - verbose=0, - sample_weight=sample_weight) - model.fit( - x_train, - y_train, - batch_size=batch_size, - epochs=epochs // 3, - verbose=0, - sample_weight=sample_weight, - validation_split=0.1) + model = testing_utils.get_small_sequential_mlp( + num_hidden=10, num_classes=num_classes, input_dim=input_dim) + model.compile( + RMSPropOptimizer(learning_rate=learning_rate), + metrics=['acc'], + weighted_metrics=['mae'], + loss='categorical_crossentropy') + + np.random.seed(43) + (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( + train_samples=train_samples, + test_samples=test_samples, + input_shape=(input_dim,), + num_classes=num_classes) + int_y_test = y_test.copy() + int_y_train = y_train.copy() + # convert class vectors to binary class matrices + y_train = keras.utils.to_categorical(y_train, num_classes) + y_test = keras.utils.to_categorical(y_test, num_classes) + test_ids = np.where(int_y_test == np.array(weighted_class))[0] - model.train_on_batch( - x_train[:batch_size], - y_train[:batch_size], - sample_weight=sample_weight[:batch_size]) - model.test_on_batch( - x_train[:batch_size], - y_train[:batch_size], - sample_weight=sample_weight[:batch_size]) - ref_score = model.evaluate(x_test, y_test, verbose=0) - if not context.executing_eagerly(): - score = model.evaluate( - x_test[test_ids, :], y_test[test_ids, :], verbose=0) - self.assertLess(score[0], ref_score[0]) + sample_weight = np.ones((y_train.shape[0])) + sample_weight[int_y_train == weighted_class] = 2. + + model.fit( + x_train, + y_train, + batch_size=batch_size, + epochs=epochs // 3, + verbose=0, + sample_weight=sample_weight) + model.fit( + x_train, + y_train, + batch_size=batch_size, + epochs=epochs // 3, + verbose=0, + sample_weight=sample_weight, + validation_split=0.1) + + model.train_on_batch( + x_train[:batch_size], + y_train[:batch_size], + sample_weight=sample_weight[:batch_size]) + model.test_on_batch( + x_train[:batch_size], + y_train[:batch_size], + sample_weight=sample_weight[:batch_size]) + ref_score = model.evaluate(x_test, y_test, verbose=0) + if not context.executing_eagerly(): + score = model.evaluate( + x_test[test_ids, :], y_test[test_ids, :], verbose=0) + self.assertLess(score[0], ref_score[0]) @tf_test_util.run_in_graph_and_eager_modes def test_warning_for_concurrent_sample_and_class_weights(self): @@ -1909,223 +1895,198 @@ class TestTrainingWithDatasetIterators(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_training_and_eval_methods_on_iterators_single_io(self): - with self.test_session(): - x = keras.layers.Input(shape=(3,), name='input') - y = keras.layers.Dense(4, name='dense')(x) - model = keras.Model(x, y) - - optimizer = RMSPropOptimizer(learning_rate=0.001) - loss = 'mse' - metrics = ['mae', metrics_module.CategoricalAccuracy()] - model.compile(optimizer, loss, metrics=metrics) - - inputs = np.zeros((10, 3)) - targets = np.zeros((10, 4)) - dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) - dataset = dataset.repeat(100) - dataset = dataset.batch(10) - iterator = dataset.make_one_shot_iterator() - - model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1) - model.evaluate(iterator, steps=2, verbose=1) - model.predict(iterator, steps=2) - model.train_on_batch(iterator) - model.test_on_batch(iterator) - model.predict_on_batch(iterator) - - # Test with validation data + model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) + optimizer = RMSPropOptimizer(learning_rate=0.001) + loss = 'mse' + metrics = ['mae', metrics_module.CategoricalAccuracy()] + model.compile(optimizer, loss, metrics=metrics) + + inputs = np.zeros((10, 3)) + targets = np.zeros((10, 4)) + dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) + dataset = dataset.repeat(100) + dataset = dataset.batch(10) + iterator = dataset.make_one_shot_iterator() + + model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1) + model.evaluate(iterator, steps=2, verbose=1) + model.predict(iterator, steps=2) + model.train_on_batch(iterator) + model.test_on_batch(iterator) + model.predict_on_batch(iterator) + + # Test with validation data + model.fit(iterator, + epochs=1, steps_per_epoch=2, verbose=0, + validation_data=iterator, validation_steps=2) + # Test with validation split + with self.assertRaisesRegexp( + ValueError, '`validation_split` argument is not supported ' + 'when input `x` is a dataset or a dataset iterator'): model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=0, - validation_data=iterator, validation_steps=2) - # Test with validation split - with self.assertRaisesRegexp( - ValueError, '`validation_split` argument is not supported ' - 'when input `x` is a dataset or a dataset iterator'): - model.fit(iterator, - epochs=1, steps_per_epoch=2, verbose=0, - validation_split=0.5, validation_steps=2) - - # Test with sample weight. - sample_weight = np.random.random((10,)) - with self.assertRaisesRegexp( - ValueError, '`sample_weight` argument is not supported ' - 'when input `x` is a dataset or a dataset iterator'): - model.fit( - iterator, - epochs=1, - steps_per_epoch=2, - verbose=0, - sample_weight=sample_weight) + validation_split=0.5, validation_steps=2) - # Test invalid usage - with self.assertRaisesRegexp(ValueError, - 'you should not specify a target'): - model.fit(iterator, iterator, - epochs=1, steps_per_epoch=2, verbose=0) + # Test with sample weight. + sample_weight = np.random.random((10,)) + with self.assertRaisesRegexp( + ValueError, '`sample_weight` argument is not supported ' + 'when input `x` is a dataset or a dataset iterator'): + model.fit( + iterator, + epochs=1, + steps_per_epoch=2, + verbose=0, + sample_weight=sample_weight) - with self.assertRaisesRegexp( - ValueError, 'you should specify the `steps_per_epoch` argument'): - model.fit(iterator, epochs=1, verbose=0) - with self.assertRaisesRegexp(ValueError, - 'you should specify the `steps` argument'): - model.evaluate(iterator, verbose=0) - with self.assertRaisesRegexp(ValueError, - 'you should specify the `steps` argument'): - model.predict(iterator, verbose=0) + # Test invalid usage + with self.assertRaisesRegexp(ValueError, + 'you should not specify a target'): + model.fit(iterator, iterator, + epochs=1, steps_per_epoch=2, verbose=0) + + with self.assertRaisesRegexp( + ValueError, 'you should specify the `steps_per_epoch` argument'): + model.fit(iterator, epochs=1, verbose=0) + with self.assertRaisesRegexp(ValueError, + 'you should specify the `steps` argument'): + model.evaluate(iterator, verbose=0) + with self.assertRaisesRegexp(ValueError, + 'you should specify the `steps` argument'): + model.predict(iterator, verbose=0) @tf_test_util.run_in_graph_and_eager_modes def test_get_next_op_created_once(self): - with self.test_session(): - x = keras.layers.Input(shape=(3,), name='input') - y = keras.layers.Dense(4, name='dense')(x) - model = keras.Model(x, y) - - optimizer = RMSPropOptimizer(learning_rate=0.001) - loss = 'mse' - metrics = ['mae'] - model.compile(optimizer, loss, metrics=metrics) - - inputs = np.zeros((10, 3)) - targets = np.zeros((10, 4)) - dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) - dataset = dataset.repeat(100) - dataset = dataset.batch(10) - iterator = dataset.make_one_shot_iterator() - - model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1) - # Finalize graph to make sure we are not appending another iterator - # get_next op in the graph. - ops.get_default_graph().finalize() - model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1) + model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) + optimizer = RMSPropOptimizer(learning_rate=0.001) + loss = 'mse' + metrics = ['mae'] + model.compile(optimizer, loss, metrics=metrics) + + inputs = np.zeros((10, 3)) + targets = np.zeros((10, 4)) + dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) + dataset = dataset.repeat(100) + dataset = dataset.batch(10) + iterator = dataset.make_one_shot_iterator() + + model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1) + # Finalize graph to make sure we are not appending another iterator + # get_next op in the graph. + ops.get_default_graph().finalize() + model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1) @tf_test_util.run_in_graph_and_eager_modes def test_iterators_running_out_of_data(self): - with self.test_session(): - x = keras.layers.Input(shape=(3,), name='input') - y = keras.layers.Dense(4, name='dense')(x) - model = keras.Model(x, y) - - optimizer = RMSPropOptimizer(learning_rate=0.001) - loss = 'mse' - metrics = ['mae'] - model.compile(optimizer, loss, metrics=metrics) + model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) + optimizer = RMSPropOptimizer(learning_rate=0.001) + loss = 'mse' + metrics = ['mae'] + model.compile(optimizer, loss, metrics=metrics) - inputs = np.zeros((10, 3)) - targets = np.zeros((10, 4)) - dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) - dataset = dataset.repeat(2) - dataset = dataset.batch(10) - iterator = dataset.make_one_shot_iterator() + inputs = np.zeros((10, 3)) + targets = np.zeros((10, 4)) + dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) + dataset = dataset.repeat(2) + dataset = dataset.batch(10) + iterator = dataset.make_one_shot_iterator() - with test.mock.patch.object(logging, 'warning') as mock_log: - model.fit(iterator, epochs=1, steps_per_epoch=3, verbose=0) - self.assertRegexpMatches( - str(mock_log.call_args), - 'dataset iterator ran out of data') + with test.mock.patch.object(logging, 'warning') as mock_log: + model.fit(iterator, epochs=1, steps_per_epoch=3, verbose=0) + self.assertRegexpMatches( + str(mock_log.call_args), + 'dataset iterator ran out of data') class TestTrainingWithDataset(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_calling_model_on_same_dataset(self): - with self.test_session(): - x = keras.layers.Input(shape=(3,), name='input') - y = keras.layers.Dense(4, name='dense')(x) - model = keras.Model(x, y) - - optimizer = RMSPropOptimizer(learning_rate=0.001) - loss = 'mse' - metrics = ['mae'] - model.compile(optimizer, loss, metrics=metrics) - - inputs = np.zeros((10, 3)) - targets = np.zeros((10, 4)) - dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) - dataset = dataset.repeat(100) - dataset = dataset.batch(10) - - # Call fit with validation data - model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0, - validation_data=dataset, validation_steps=2) - # Finalize the graph to make sure new ops aren't added when calling on the - # same dataset - ops.get_default_graph().finalize() - model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0, - validation_data=dataset, validation_steps=2) + model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) + optimizer = RMSPropOptimizer(learning_rate=0.001) + loss = 'mse' + metrics = ['mae'] + model.compile(optimizer, loss, metrics=metrics) + + inputs = np.zeros((10, 3)) + targets = np.zeros((10, 4)) + dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) + dataset = dataset.repeat(100) + dataset = dataset.batch(10) + + # Call fit with validation data + model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0, + validation_data=dataset, validation_steps=2) + # Finalize the graph to make sure new ops aren't added when calling on the + # same dataset + ops.get_default_graph().finalize() + model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0, + validation_data=dataset, validation_steps=2) @tf_test_util.run_in_graph_and_eager_modes def test_training_and_eval_methods_on_dataset(self): - with self.test_session(): - x = keras.layers.Input(shape=(3,), name='input') - y = keras.layers.Dense(4, name='dense')(x) - model = keras.Model(x, y) - - optimizer = RMSPropOptimizer(learning_rate=0.001) - loss = 'mse' - metrics = ['mae', metrics_module.CategoricalAccuracy()] - model.compile(optimizer, loss, metrics=metrics) - - inputs = np.zeros((10, 3)) - targets = np.zeros((10, 4)) - dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) - dataset = dataset.repeat(100) - dataset = dataset.batch(10) - - model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) - model.evaluate(dataset, steps=2, verbose=1) - model.predict(dataset, steps=2) - model.train_on_batch(dataset) - model.predict_on_batch(dataset) - - # Test with validation data - model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0, - validation_data=dataset, validation_steps=2) - - # Test with validation split - with self.assertRaisesRegexp( - ValueError, '`validation_split` argument is not supported ' - 'when input `x` is a dataset or a dataset iterator'): - model.fit(dataset, - epochs=1, steps_per_epoch=2, verbose=0, - validation_split=0.5, validation_steps=2) - - # Test with sample weight. - sample_weight = np.random.random((10,)) - with self.assertRaisesRegexp( - ValueError, '`sample_weight` argument is not supported ' - 'when input `x` is a dataset or a dataset iterator'): - model.fit( - dataset, - epochs=1, - steps_per_epoch=2, - verbose=0, - sample_weight=sample_weight) + model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) + optimizer = RMSPropOptimizer(learning_rate=0.001) + loss = 'mse' + metrics = ['mae', metrics_module.CategoricalAccuracy()] + model.compile(optimizer, loss, metrics=metrics) + + inputs = np.zeros((10, 3)) + targets = np.zeros((10, 4)) + dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) + dataset = dataset.repeat(100) + dataset = dataset.batch(10) + + model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) + model.evaluate(dataset, steps=2, verbose=1) + model.predict(dataset, steps=2) + model.train_on_batch(dataset) + model.predict_on_batch(dataset) + + # Test with validation data + model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0, + validation_data=dataset, validation_steps=2) + + # Test with validation split + with self.assertRaisesRegexp( + ValueError, '`validation_split` argument is not supported ' + 'when input `x` is a dataset or a dataset iterator'): + model.fit(dataset, + epochs=1, steps_per_epoch=2, verbose=0, + validation_split=0.5, validation_steps=2) - # Test invalid usage - with self.assertRaisesRegexp(ValueError, - 'you should not specify a target'): - model.fit(dataset, dataset, - epochs=1, steps_per_epoch=2, verbose=0) + # Test with sample weight. + sample_weight = np.random.random((10,)) + with self.assertRaisesRegexp( + ValueError, '`sample_weight` argument is not supported ' + 'when input `x` is a dataset or a dataset iterator'): + model.fit( + dataset, + epochs=1, + steps_per_epoch=2, + verbose=0, + sample_weight=sample_weight) - with self.assertRaisesRegexp( - ValueError, 'you should specify the `steps_per_epoch` argument'): - model.fit(dataset, epochs=1, verbose=0) - with self.assertRaisesRegexp(ValueError, - 'you should specify the `steps` argument'): - model.evaluate(dataset, verbose=0) - with self.assertRaisesRegexp(ValueError, - 'you should specify the `steps` argument'): - model.predict(dataset, verbose=0) + # Test invalid usage + with self.assertRaisesRegexp(ValueError, + 'you should not specify a target'): + model.fit(dataset, dataset, + epochs=1, steps_per_epoch=2, verbose=0) + + with self.assertRaisesRegexp( + ValueError, 'you should specify the `steps_per_epoch` argument'): + model.fit(dataset, epochs=1, verbose=0) + with self.assertRaisesRegexp(ValueError, + 'you should specify the `steps` argument'): + model.evaluate(dataset, verbose=0) + with self.assertRaisesRegexp(ValueError, + 'you should specify the `steps` argument'): + model.predict(dataset, verbose=0) def test_dataset_input_shape_validation(self): with self.test_session(): - x = keras.layers.Input(shape=(3,), name='input') - y = keras.layers.Dense(4, name='dense')(x) - model = keras.Model(x, y) - - optimizer = RMSPropOptimizer(learning_rate=0.001) - loss = 'mse' - model.compile(optimizer, loss) + model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) + model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse') # User forgets to batch the dataset inputs = np.zeros((10, 3)) @@ -2134,7 +2095,7 @@ class TestTrainingWithDataset(test.TestCase): dataset = dataset.repeat(100) with self.assertRaisesRegexp(ValueError, - 'expected input to have 2 dimensions'): + r'expected (.*?) to have 2 dimensions'): model.train_on_batch(dataset) # Wrong input shape @@ -2145,7 +2106,7 @@ class TestTrainingWithDataset(test.TestCase): dataset = dataset.batch(10) with self.assertRaisesRegexp(ValueError, - 'expected input to have shape'): + r'expected (.*?) to have shape \(3,\)'): model.train_on_batch(dataset) @@ -2176,134 +2137,127 @@ class TestTrainingWithMetrics(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_metrics_correctness(self): - with self.test_session(): - model = keras.Sequential() - model.add( - keras.layers.Dense( - 3, activation='relu', input_dim=4, kernel_initializer='ones')) - model.add( - keras.layers.Dense( - 1, activation='sigmoid', kernel_initializer='ones')) - model.compile( - loss='mae', - metrics=['accuracy', metrics_module.BinaryAccuracy()], - optimizer=RMSPropOptimizer(learning_rate=0.001)) - - # verify correctness of stateful and stateless metrics. - x = np.ones((100, 4)) - y = np.ones((100, 1)) - outs = model.evaluate(x, y) - self.assertEqual(outs[1], 1.) - self.assertEqual(outs[2], 1.) - - y = np.zeros((100, 1)) - outs = model.evaluate(x, y) - self.assertEqual(outs[1], 0.) - self.assertEqual(outs[2], 0.) + model = keras.Sequential() + model.add( + keras.layers.Dense( + 3, activation='relu', input_dim=4, kernel_initializer='ones')) + model.add( + keras.layers.Dense( + 1, activation='sigmoid', kernel_initializer='ones')) + model.compile( + loss='mae', + metrics=['accuracy', metrics_module.BinaryAccuracy()], + optimizer=RMSPropOptimizer(learning_rate=0.001)) + + # verify correctness of stateful and stateless metrics. + x = np.ones((100, 4)) + y = np.ones((100, 1)) + outs = model.evaluate(x, y) + self.assertEqual(outs[1], 1.) + self.assertEqual(outs[2], 1.) + + y = np.zeros((100, 1)) + outs = model.evaluate(x, y) + self.assertEqual(outs[1], 0.) + self.assertEqual(outs[2], 0.) @tf_test_util.run_in_graph_and_eager_modes def test_metrics_correctness_with_iterator(self): - with self.test_session(): - model = keras.Sequential() - model.add( - keras.layers.Dense( - 8, activation='relu', input_dim=4, kernel_initializer='ones')) - model.add( - keras.layers.Dense( - 1, activation='sigmoid', kernel_initializer='ones')) - model.compile( - loss='binary_crossentropy', - metrics=['accuracy', metrics_module.BinaryAccuracy()], - optimizer=RMSPropOptimizer(learning_rate=0.001)) - - np.random.seed(123) - x = np.random.randint(10, size=(100, 4)).astype(np.float32) - y = np.random.randint(2, size=(100, 1)).astype(np.float32) - dataset = dataset_ops.Dataset.from_tensor_slices((x, y)) - dataset = dataset.batch(10) - iterator = dataset.make_one_shot_iterator() - outs = model.evaluate(iterator, steps=10) - self.assertEqual(np.around(outs[1], decimals=1), 0.5) - self.assertEqual(np.around(outs[2], decimals=1), 0.5) - - y = np.zeros((100, 1), dtype=np.float32) - dataset = dataset_ops.Dataset.from_tensor_slices((x, y)) - dataset = dataset.repeat(100) - dataset = dataset.batch(10) - iterator = dataset.make_one_shot_iterator() - outs = model.evaluate(iterator, steps=10) - self.assertEqual(outs[1], 0.) - self.assertEqual(outs[2], 0.) + model = keras.Sequential() + model.add( + keras.layers.Dense( + 8, activation='relu', input_dim=4, kernel_initializer='ones')) + model.add( + keras.layers.Dense( + 1, activation='sigmoid', kernel_initializer='ones')) + model.compile( + loss='binary_crossentropy', + metrics=['accuracy', metrics_module.BinaryAccuracy()], + optimizer=RMSPropOptimizer(learning_rate=0.001)) + + np.random.seed(123) + x = np.random.randint(10, size=(100, 4)).astype(np.float32) + y = np.random.randint(2, size=(100, 1)).astype(np.float32) + dataset = dataset_ops.Dataset.from_tensor_slices((x, y)) + dataset = dataset.batch(10) + iterator = dataset.make_one_shot_iterator() + outs = model.evaluate(iterator, steps=10) + self.assertEqual(np.around(outs[1], decimals=1), 0.5) + self.assertEqual(np.around(outs[2], decimals=1), 0.5) + + y = np.zeros((100, 1), dtype=np.float32) + dataset = dataset_ops.Dataset.from_tensor_slices((x, y)) + dataset = dataset.repeat(100) + dataset = dataset.batch(10) + iterator = dataset.make_one_shot_iterator() + outs = model.evaluate(iterator, steps=10) + self.assertEqual(outs[1], 0.) + self.assertEqual(outs[2], 0.) @tf_test_util.run_in_graph_and_eager_modes def test_metrics_correctness_with_weighted_metrics(self): - with self.test_session(): - np.random.seed(1337) - x = np.array([[[1.], [1.]], [[0.], [0.]]]) - model = keras.models.Sequential() - model.add( - keras.layers.TimeDistributed( - keras.layers.Dense(1, kernel_initializer='ones'), - input_shape=(2, 1))) - model.compile( - RMSPropOptimizer(learning_rate=0.001), - loss='mse', - sample_weight_mode='temporal', - weighted_metrics=['accuracy', - metrics_module.BinaryAccuracy()]) - y = np.array([[[1.], [1.]], [[1.], [1.]]]) + np.random.seed(1337) + x = np.array([[[1.], [1.]], [[0.], [0.]]]) + model = keras.models.Sequential() + model.add( + keras.layers.TimeDistributed( + keras.layers.Dense(1, kernel_initializer='ones'), + input_shape=(2, 1))) + model.compile( + RMSPropOptimizer(learning_rate=0.001), + loss='mse', + sample_weight_mode='temporal', + weighted_metrics=['accuracy', + metrics_module.BinaryAccuracy()]) + y = np.array([[[1.], [1.]], [[1.], [1.]]]) - outs = model.evaluate(x, y) - self.assertEqual(outs, [0.5, 0.5, 0.5]) + outs = model.evaluate(x, y) + self.assertEqual(outs, [0.5, 0.5, 0.5]) - w = np.array([[0., 0.], [0., 0.]]) - outs = model.evaluate(x, y, sample_weight=w) - self.assertEqual(outs, [0., 0., 0.]) + w = np.array([[0., 0.], [0., 0.]]) + outs = model.evaluate(x, y, sample_weight=w) + self.assertEqual(outs, [0., 0., 0.]) - w = np.array([[3., 4.], [1., 2.]]) - outs = model.evaluate(x, y, sample_weight=w) - self.assertArrayNear(outs, [0.3, 0.7, 0.7], .001) + w = np.array([[3., 4.], [1., 2.]]) + outs = model.evaluate(x, y, sample_weight=w) + self.assertArrayNear(outs, [0.3, 0.7, 0.7], .001) @tf_test_util.run_in_graph_and_eager_modes def test_metric_state_reset_between_fit_and_evaluate(self): - with self.test_session(): - model = keras.Sequential() - model.add(keras.layers.Dense(3, activation='relu', input_dim=4)) - model.add(keras.layers.Dense(1, activation='sigmoid')) - acc_obj = metrics_module.BinaryAccuracy() - model.compile( - loss='mae', - metrics=[acc_obj], - optimizer=RMSPropOptimizer(learning_rate=0.001)) + model = keras.Sequential() + model.add(keras.layers.Dense(3, activation='relu', input_dim=4)) + model.add(keras.layers.Dense(1, activation='sigmoid')) + acc_obj = metrics_module.BinaryAccuracy() + model.compile( + loss='mae', + metrics=[acc_obj], + optimizer=RMSPropOptimizer(learning_rate=0.001)) - x_train = np.random.random((100, 4)) - y_train = np.random.random((100, 1)) - model.fit(x_train, y_train, batch_size=5, epochs=2) - self.assertEqual(self.evaluate(acc_obj.count), 100) + x_train = np.random.random((100, 4)) + y_train = np.random.random((100, 1)) + model.fit(x_train, y_train, batch_size=5, epochs=2) + self.assertEqual(self.evaluate(acc_obj.count), 100) - x_test = np.random.random((10, 4)) - y_test = np.random.random((10, 1)) - model.evaluate(x_test, y_test, batch_size=5) - self.assertEqual(self.evaluate(acc_obj.count), 10) + x_test = np.random.random((10, 4)) + y_test = np.random.random((10, 1)) + model.evaluate(x_test, y_test, batch_size=5) + self.assertEqual(self.evaluate(acc_obj.count), 10) @tf_test_util.run_in_graph_and_eager_modes def test_invalid_metrics(self): num_classes = 5 input_dim = 5 - with self.test_session(): - model = keras.models.Sequential() - model.add( - keras.layers.Dense(10, activation='relu', input_shape=(input_dim,))) - model.add(keras.layers.Dense(num_classes, activation='softmax')) + model = testing_utils.get_small_sequential_mlp( + num_hidden=10, num_classes=num_classes, input_dim=input_dim) - with self.assertRaisesRegexp( - TypeError, 'Type of `metrics` argument not understood. ' - 'Expected a list or dictionary, found: '): - model.compile( - RMSPropOptimizer(learning_rate=0.001), - loss='categorical_crossentropy', - metrics=metrics_module.CategoricalAccuracy()) + with self.assertRaisesRegexp( + TypeError, 'Type of `metrics` argument not understood. ' + 'Expected a list or dictionary, found: '): + model.compile( + RMSPropOptimizer(learning_rate=0.001), + loss='categorical_crossentropy', + metrics=metrics_module.CategoricalAccuracy()) @tf_test_util.run_in_graph_and_eager_modes def test_metrics_masking(self): diff --git a/tensorflow/python/keras/testing_utils.py b/tensorflow/python/keras/testing_utils.py index 6e8ee06ff5..58405c550b 100644 --- a/tensorflow/python/keras/testing_utils.py +++ b/tensorflow/python/keras/testing_utils.py @@ -184,3 +184,22 @@ def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None, # for further checks in the caller function return actual_output + +def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None): + model = keras.models.Sequential() + if input_dim: + model.add(keras.layers.Dense(num_hidden, activation='relu', + input_dim=input_dim)) + else: + model.add(keras.layers.Dense(num_hidden, activation='relu')) + activation = 'sigmoid' if num_classes == 1 else 'softmax' + model.add(keras.layers.Dense(num_classes, activation=activation)) + return model + + +def get_small_functional_mlp(num_hidden, num_classes, input_dim): + inputs = keras.Input(shape=(input_dim,)) + outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs) + activation = 'sigmoid' if num_classes == 1 else 'softmax' + outputs = keras.layers.Dense(num_classes, activation=activation)(outputs) + return keras.Model(inputs, outputs) |