aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/distribute
diff options
context:
space:
mode:
authorGravatar Zhenyu Tan <tanzheny@google.com>2018-09-12 12:33:24 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-09-12 12:42:47 -0700
commit3fb474713b27552eba1943bb4172e54ad2dd13bc (patch)
treeaffa9a8d670fe77de364dffda74584a194855ab7 /tensorflow/contrib/distribute
parent28e945e590b07de137f318a70896bc4fc31f7053 (diff)
Add unit test for model_to_estimator where inpu_fn
returns features and labels as a list instead of dict. PiperOrigin-RevId: 212685344
Diffstat (limited to 'tensorflow/contrib/distribute')
-rw-r--r--tensorflow/contrib/distribute/python/keras_test.py119
1 files changed, 119 insertions, 0 deletions
diff --git a/tensorflow/contrib/distribute/python/keras_test.py b/tensorflow/contrib/distribute/python/keras_test.py
index 9e1762d92c..5f35e38189 100644
--- a/tensorflow/contrib/distribute/python/keras_test.py
+++ b/tensorflow/contrib/distribute/python/keras_test.py
@@ -34,6 +34,7 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import distributed_training_utils
+from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
@@ -66,6 +67,32 @@ def simple_functional_model():
return model
+def multi_inputs_multi_outputs_model():
+ input_a = keras.layers.Input(shape=(16,), name='input_a')
+ input_b = keras.layers.Input(shape=(16,), name='input_b')
+ input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
+ dense = keras.layers.Dense(8, name='dense_1')
+
+ interm_a = dense(input_a)
+ # Read m
+ interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
+ interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
+ interm_b = dense(input_b)
+ merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
+ output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
+ output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
+ model = keras.models.Model(
+ inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
+ model.compile(
+ loss='categorical_crossentropy',
+ optimizer=gradient_descent.GradientDescentOptimizer(0.001),
+ metrics={
+ 'dense_2': 'categorical_accuracy',
+ 'dense_3': 'categorical_accuracy'
+ })
+ return model
+
+
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
@@ -94,6 +121,49 @@ def get_ds_test_input_fn():
return dataset
+def get_multi_inputs_multi_outputs_data():
+ (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
+ train_samples=_TRAIN_SIZE,
+ test_samples=50,
+ input_shape=(16,),
+ num_classes=3,
+ random_seed=_RANDOM_SEED)
+ (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
+ train_samples=_TRAIN_SIZE,
+ test_samples=50,
+ input_shape=(16,),
+ num_classes=2,
+ random_seed=_RANDOM_SEED)
+ (m_train, _), (m_test, _) = testing_utils.get_test_data(
+ train_samples=_TRAIN_SIZE,
+ test_samples=50,
+ input_shape=(8,),
+ num_classes=2,
+ random_seed=_RANDOM_SEED)
+
+ c_train = keras.utils.to_categorical(c_train)
+ c_test = keras.utils.to_categorical(c_test)
+ d_train = keras.utils.to_categorical(d_train)
+ d_test = keras.utils.to_categorical(d_test)
+
+ train_data = {
+ 'input_a': a_train,
+ 'input_b': b_train,
+ 'input_m': m_train,
+ 'output_c': c_train,
+ 'output_d': d_train
+ }
+ test_data = {
+ 'input_a': a_test,
+ 'input_b': b_test,
+ 'input_m': m_test,
+ 'output_c': c_test,
+ 'output_d': d_test
+ }
+
+ return (train_data, test_data)
+
+
def batch_wrapper(dataset, batch_size, distribution):
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
@@ -121,6 +191,8 @@ class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase):
gfile.MakeDirs(self._base_dir)
self._config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED, model_dir=self._base_dir)
+ self._dist = mirrored_strategy.MirroredStrategy(
+ devices=['/device:GPU:0', '/device:GPU:1'])
def tearDown(self):
writer_cache.FileWriterCache.clear()
@@ -174,6 +246,53 @@ class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase):
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
+ def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self):
+ train_data, test_data = get_multi_inputs_multi_outputs_data()
+
+ def train_input_fn():
+ input_dict = {
+ 'input_a': train_data['input_a'],
+ 'input_b': train_data['input_b'],
+ 'input_m': train_data['input_m'].astype(np.str)
+ }
+ output_dict = {
+ 'dense_2': train_data['output_c'],
+ 'dense_3': train_data['output_d']
+ }
+ return dataset_ops.Dataset.from_tensor_slices((input_dict,
+ output_dict)).batch(16)
+
+ def eval_input_fn():
+ input_dict = {
+ 'input_a': test_data['input_a'],
+ 'input_b': test_data['input_b'],
+ 'input_m': test_data['input_m'].astype(np.str)
+ }
+ output_dict = {
+ 'dense_2': test_data['output_c'],
+ 'dense_3': test_data['output_d']
+ }
+ return dataset_ops.Dataset.from_tensor_slices((input_dict,
+ output_dict)).batch(16)
+
+ self.do_test_multi_inputs_multi_outputs_with_input_fn(
+ train_input_fn, eval_input_fn)
+
+ def do_test_multi_inputs_multi_outputs_with_input_fn(self, train_input_fn,
+ eval_input_fn):
+ config = run_config_lib.RunConfig(
+ tf_random_seed=_RANDOM_SEED,
+ model_dir=self._base_dir,
+ train_distribute=self._dist)
+ with self.cached_session():
+ model = multi_inputs_multi_outputs_model()
+ est_keras = keras_lib.model_to_estimator(keras_model=model, config=config)
+ baseline_eval_results = est_keras.evaluate(
+ input_fn=eval_input_fn, steps=1)
+ est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
+ eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
+ self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
+
def test_keras_optimizer_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])