aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/estimator
diff options
context:
space:
mode:
authorGravatar Yan Facai (颜发才) <facai.yan@gmail.com>2018-09-13 13:22:37 +0800
committerGravatar Yan Facai (颜发才) <facai.yan@gmail.com>2018-09-13 13:22:37 +0800
commit04ddc2daf4c76bb4c99fdc6b582025e9a4ffba52 (patch)
tree4dd8424588dc21f2e4d23a591325bde7d3b63a66 /tensorflow/python/estimator
parentfd41d2c959372d7a068cb4474391362ef6a92fca (diff)
parent845aaec5ec2191f2708247a09d9bad37f012f536 (diff)
Merge branch 'master' into ENH/feature_importances_for_boosted_tree
Diffstat (limited to 'tensorflow/python/estimator')
-rw-r--r--tensorflow/python/estimator/BUILD2
-rw-r--r--tensorflow/python/estimator/canned/boosted_trees.py12
-rw-r--r--tensorflow/python/estimator/canned/boosted_trees_test.py16
-rw-r--r--tensorflow/python/estimator/canned/dnn.py14
-rw-r--r--tensorflow/python/estimator/canned/head_test.py208
-rw-r--r--tensorflow/python/estimator/estimator.py69
-rw-r--r--tensorflow/python/estimator/inputs/numpy_io_test.py34
-rw-r--r--tensorflow/python/estimator/inputs/pandas_io_test.py24
-rw-r--r--tensorflow/python/estimator/keras.py75
-rw-r--r--tensorflow/python/estimator/keras_test.py268
-rw-r--r--tensorflow/python/estimator/model_fn.py43
-rw-r--r--tensorflow/python/estimator/run_config.py7
12 files changed, 460 insertions, 312 deletions
diff --git a/tensorflow/python/estimator/BUILD b/tensorflow/python/estimator/BUILD
index 9fce172bee..bfcc019dd5 100644
--- a/tensorflow/python/estimator/BUILD
+++ b/tensorflow/python/estimator/BUILD
@@ -685,7 +685,7 @@ py_test(
srcs_version = "PY2AND3",
tags = [
"no_windows",
- "notsan",
+ "notsan", # b/67510291
],
deps = [
":keras",
diff --git a/tensorflow/python/estimator/canned/boosted_trees.py b/tensorflow/python/estimator/canned/boosted_trees.py
index 1c7e2189c2..812c892363 100644
--- a/tensorflow/python/estimator/canned/boosted_trees.py
+++ b/tensorflow/python/estimator/canned/boosted_trees.py
@@ -1138,8 +1138,11 @@ class BoostedTreesClassifier(_BoostedTrees):
bucketized_feature_2 = bucketized_column(
numeric_column('feature_2'), BUCKET_BOUNDARIES_2)
+ # Need to see a large portion of the data before we can build a layer, for
+ # example half of data n_batches_per_layer = 0.5 * NUM_EXAMPLES / BATCH_SIZE
classifier = estimator.BoostedTreesClassifier(
feature_columns=[bucketized_feature_1, bucketized_feature_2],
+ n_batches_per_layer=n_batches_per_layer,
n_trees=100,
... <some other params>
)
@@ -1162,7 +1165,8 @@ class BoostedTreesClassifier(_BoostedTrees):
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
n_batches_per_layer: the number of batches to collect statistics per
- layer.
+ layer. The total number of batches is total number of data divided by
+ batch size.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
@@ -1277,8 +1281,11 @@ class BoostedTreesRegressor(_BoostedTrees):
bucketized_feature_2 = bucketized_column(
numeric_column('feature_2'), BUCKET_BOUNDARIES_2)
+ # Need to see a large portion of the data before we can build a layer, for
+ # example half of data n_batches_per_layer = 0.5 * NUM_EXAMPLES / BATCH_SIZE
regressor = estimator.BoostedTreesRegressor(
feature_columns=[bucketized_feature_1, bucketized_feature_2],
+ n_batches_per_layer=n_batches_per_layer,
n_trees=100,
... <some other params>
)
@@ -1301,7 +1308,8 @@ class BoostedTreesRegressor(_BoostedTrees):
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
n_batches_per_layer: the number of batches to collect statistics per
- layer.
+ layer. The total number of batches is total number of data divided by
+ batch size.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
diff --git a/tensorflow/python/estimator/canned/boosted_trees_test.py b/tensorflow/python/estimator/canned/boosted_trees_test.py
index c1309fb809..1ce4f7d765 100644
--- a/tensorflow/python/estimator/canned/boosted_trees_test.py
+++ b/tensorflow/python/estimator/canned/boosted_trees_test.py
@@ -2087,7 +2087,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
self._get_expected_ensembles_for_classification())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train with train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
@@ -2120,7 +2120,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
expected_first, expected_second, expected_third, expected_forth = (
self._get_expected_ensembles_for_classification_with_bias())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
boosted_trees._create_classification_head(n_classes=2),
@@ -2160,7 +2160,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
self._get_expected_ensembles_for_classification())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train without train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
@@ -2193,7 +2193,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
expected_first, expected_second, expected_third, expected_forth = (
self._get_expected_ensembles_for_classification_with_bias())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
boosted_trees._create_classification_head(n_classes=2),
@@ -2231,7 +2231,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
self._get_expected_ensembles_for_regression())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train with train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
@@ -2261,7 +2261,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third, expected_forth = (
self._get_expected_ensembles_for_regression_with_bias())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train with train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
@@ -2301,7 +2301,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
self._get_expected_ensembles_for_regression())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train without train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
@@ -2331,7 +2331,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third, expected_forth = (
self._get_expected_ensembles_for_regression_with_bias())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train with train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
diff --git a/tensorflow/python/estimator/canned/dnn.py b/tensorflow/python/estimator/canned/dnn.py
index c08cf61220..1c0c4581c0 100644
--- a/tensorflow/python/estimator/canned/dnn.py
+++ b/tensorflow/python/estimator/canned/dnn.py
@@ -142,7 +142,7 @@ def _dnn_model_fn(features,
dropout=None,
input_layer_partitioner=None,
config=None,
- tpu_estimator_spec=False,
+ use_tpu=False,
batch_norm=False):
"""Deep Neural Net model_fn.
@@ -164,8 +164,8 @@ def _dnn_model_fn(features,
input_layer_partitioner: Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
- tpu_estimator_spec: Whether to return a `_TPUEstimatorSpec` or
- or `model_fn.EstimatorSpec` instance.
+ use_tpu: Whether to make a DNN model able to run on TPU. Will make function
+ return a `_TPUEstimatorSpec` instance and disable variable partitioning.
batch_norm: Whether to use batch normalization after each hidden layer.
Returns:
@@ -182,13 +182,15 @@ def _dnn_model_fn(features,
optimizer, learning_rate=_LEARNING_RATE)
num_ps_replicas = config.num_ps_replicas if config else 0
- partitioner = partitioned_variables.min_max_variable_partitioner(
- max_partitions=num_ps_replicas)
+ partitioner = (None if use_tpu else
+ partitioned_variables.min_max_variable_partitioner(
+ max_partitions=num_ps_replicas))
with variable_scope.variable_scope(
'dnn',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = input_layer_partitioner or (
+ None if use_tpu else
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
@@ -203,7 +205,7 @@ def _dnn_model_fn(features,
batch_norm=batch_norm)
logits = logit_fn(features=features, mode=mode)
- if tpu_estimator_spec:
+ if use_tpu:
return head._create_tpu_estimator_spec( # pylint: disable=protected-access
features=features,
mode=mode,
diff --git a/tensorflow/python/estimator/canned/head_test.py b/tensorflow/python/estimator/canned/head_test.py
index bd2e0ae943..de9c84d2ef 100644
--- a/tensorflow/python/estimator/canned/head_test.py
+++ b/tensorflow/python/estimator/canned/head_test.py
@@ -260,7 +260,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
features={'x': np.array(((30.,), (42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({
logits_placeholder: logits_2x2
@@ -293,7 +293,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[2 2\]'):
@@ -347,14 +347,14 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesOpError('Labels must <= n_classes - 1'):
training_loss.eval({
labels_placeholder: labels_2x1_with_large_id,
logits_placeholder: logits_2x3
})
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesOpError('Labels must >= 0'):
training_loss.eval({
labels_placeholder: labels_2x1_with_negative_id,
@@ -413,7 +413,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[3 1\]'):
@@ -449,7 +449,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
spec.export_outputs.keys())
# Assert predictions and export_outputs.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
@@ -484,7 +484,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_classes,
@@ -510,7 +510,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
@@ -534,7 +534,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -561,7 +561,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_input,
labels=labels_input)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(np.sum(loss), actual_training_loss.eval())
@@ -581,7 +581,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -632,7 +632,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, and metrics.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -698,7 +698,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, and metrics.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -727,7 +727,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -755,7 +755,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
}
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
@@ -804,7 +804,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert loss, and metrics.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -837,7 +837,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels)
tol = 1e-2
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
@@ -866,7 +866,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels)
tol = 1e-2
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
@@ -921,7 +921,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -962,7 +962,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
optimizer=_Optimizer())
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
@@ -992,7 +992,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
labels=np.array(((1,), (1,)), dtype=np.int64),
train_op_fn=_train_op_fn)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
sess.run(spec.train_op)
w_value, t_value = sess.run([w, t])
@@ -1023,7 +1023,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
@@ -1064,7 +1064,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -1104,7 +1104,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels_rank_1)
tol = 1e-2
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
@@ -1153,7 +1153,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -1183,7 +1183,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -1211,7 +1211,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
train_op_fn=_train_op_fn)
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss = sess.run(spec.loss)
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
@@ -1253,7 +1253,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -1292,7 +1292,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels)
tol = 1e-2
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
@@ -1327,7 +1327,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
@@ -1353,7 +1353,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -1380,7 +1380,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -1413,7 +1413,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, and metrics.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
@@ -1506,7 +1506,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({
logits_placeholder: logits_2x2
@@ -1536,7 +1536,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[2 2\]'):
@@ -1577,7 +1577,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[3 1\] \[labels_shape: \] \[2 1\]'):
@@ -1585,7 +1585,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
labels_placeholder: values_2x1,
logits_placeholder: values_3x1
})
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[3 1\]'):
@@ -1624,7 +1624,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
@@ -1660,7 +1660,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_classes,
@@ -1680,7 +1680,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -1733,7 +1733,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -1808,7 +1808,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
}
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -1832,7 +1832,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(41., training_loss.eval())
@@ -1849,7 +1849,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
logits=logits,
labels=labels)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -1877,7 +1877,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -1924,7 +1924,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
}
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -1957,7 +1957,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -1983,7 +1983,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -2011,7 +2011,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_input,
labels=labels_input)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(np.sum(loss), actual_training_loss.eval())
@@ -2031,7 +2031,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -2086,7 +2086,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -2126,7 +2126,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
labels=labels,
optimizer=_Optimizer())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss)
@@ -2153,7 +2153,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
labels=np.array(((1,), (1,),), dtype=np.float64),
train_op_fn=_train_op_fn)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
sess.run(spec.train_op)
w_value, t_value = sess.run([w, t])
@@ -2182,7 +2182,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
labels=labels,
train_op_fn=_train_op_fn)
# Assert summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
@@ -2227,7 +2227,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
regularization_losses=regularization_losses)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -2254,7 +2254,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'Labels must <= n_classes - 1'):
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
training_loss.eval()
@@ -2277,7 +2277,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -2309,7 +2309,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
train_op_fn=_train_op_fn)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAlmostEqual(expected_loss, loss, delta=1.e-5)
@@ -2334,7 +2334,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -2360,7 +2360,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
expected_loss = 1.2484322
# Assert loss.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
@@ -2385,7 +2385,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
logits=logits)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllClose(
@@ -2447,7 +2447,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
@@ -2483,7 +2483,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(),
@@ -2531,7 +2531,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
self.assertIsNotNone(spec.train_op)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((
@@ -2577,7 +2577,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
self.assertIsNotNone(spec.train_op)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((
@@ -2612,7 +2612,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
logits=logits,
labels=labels)
tol = 1e-2
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(),
@@ -2649,7 +2649,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
@@ -2675,7 +2675,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -2700,7 +2700,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -2744,7 +2744,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
}
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
@@ -2825,7 +2825,7 @@ class RegressionHead(test.TestCase):
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PREDICTIONS].eval({
logits_placeholder: logits_1d
@@ -2857,7 +2857,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.loss.eval({
labels_placeholder: values_3d,
@@ -2868,7 +2868,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 3\] \[labels_shape: \] \[2 1\]'):
@@ -2908,7 +2908,7 @@ class RegressionHead(test.TestCase):
logits=logits_placeholder,
labels=labels_placeholder,
train_op_fn=lambda x: x)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.loss.eval({
labels_placeholder: values_3d,
@@ -2919,7 +2919,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 3\] \[labels_shape: \] \[2 1\]'):
@@ -2957,7 +2957,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions.
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, spec.scaffold)
self.assertAllClose(logits, spec.predictions[prediction_key].eval())
self.assertAllClose(
@@ -2992,7 +2992,7 @@ class RegressionHead(test.TestCase):
spec.export_outputs.keys())
# Assert predictions.
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, spec.scaffold)
self.assertAllClose(
expected_predictions, spec.predictions[keys.PREDICTIONS].eval())
@@ -3019,7 +3019,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(43-45)^2, (44-41)] = [4, 9]
self.assertAllClose(13., training_loss.eval())
@@ -3045,7 +3045,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_input,
labels=labels_input)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(np.sum(loss), actual_training_loss.eval())
@@ -3064,7 +3064,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -3112,7 +3112,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
@@ -3180,7 +3180,7 @@ class RegressionHead(test.TestCase):
}
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -3212,7 +3212,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -3237,7 +3237,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -3294,7 +3294,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
@@ -3337,7 +3337,7 @@ class RegressionHead(test.TestCase):
labels=labels,
optimizer=_Optimizer())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss)
@@ -3364,7 +3364,7 @@ class RegressionHead(test.TestCase):
labels=np.array(((43.,), (44.,),), dtype=np.float64),
train_op_fn=_train_op_fn)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
sess.run(spec.train_op)
w_value, t_value = sess.run([w, t])
@@ -3394,7 +3394,7 @@ class RegressionHead(test.TestCase):
train_op_fn=_train_op_fn)
# Assert summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
@@ -3441,7 +3441,7 @@ class RegressionHead(test.TestCase):
regularization_losses=regularization_losses)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
@@ -3487,7 +3487,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
@@ -3523,7 +3523,7 @@ class RegressionHead(test.TestCase):
labels=np.array(((35,), (42,), (45,)), dtype=np.int32))
# Assert loss.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss = sess.run(spec.loss)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
@@ -3565,7 +3565,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
@@ -3600,7 +3600,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -3648,7 +3648,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
@@ -3679,7 +3679,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].
# weighted sum loss = 1 * 100 + .1 * 1 + 1.5 * 1 = 101.6
@@ -3718,7 +3718,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
@@ -3750,7 +3750,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].
# weighted sum loss = 1 * 100 + .1 * 1 + 1.5 * 1 = 101.6
@@ -3796,7 +3796,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Evaluate predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
@@ -3857,7 +3857,7 @@ class RegressionHead(test.TestCase):
self.assertIsNone(spec.train_op)
_assert_no_hooks(self, spec)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Finalize graph and initialize variables.
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
@@ -3915,7 +3915,7 @@ class RegressionHead(test.TestCase):
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertIsNotNone(spec.train_op)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Finalize graph and initialize variables.
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
@@ -3955,7 +3955,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -3988,7 +3988,7 @@ class RegressionHead(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_loss, spec.loss.eval())
@@ -4013,7 +4013,7 @@ class RegressionHead(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -4042,7 +4042,7 @@ class RegressionHead(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
diff --git a/tensorflow/python/estimator/estimator.py b/tensorflow/python/estimator/estimator.py
index 44a60495d8..0f20acefdf 100644
--- a/tensorflow/python/estimator/estimator.py
+++ b/tensorflow/python/estimator/estimator.py
@@ -35,7 +35,6 @@ from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.estimator.export import export as export_helpers
-from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
@@ -46,7 +45,6 @@ from tensorflow.python.keras import metrics
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
-from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
@@ -958,7 +956,12 @@ class Estimator(object):
mode=mode,
config=self.config)
- export_outputs = self._get_export_outputs_for_spec(estimator_spec)
+ export_outputs = model_fn_lib.export_outputs_for_mode(
+ mode=estimator_spec.mode,
+ serving_export_outputs=estimator_spec.export_outputs,
+ predictions=estimator_spec.predictions,
+ loss=estimator_spec.loss,
+ metrics=estimator_spec.eval_metric_ops)
# Build the SignatureDefs from receivers and all outputs
signature_def_map = export_helpers.build_all_signature_defs(
@@ -1015,45 +1018,6 @@ class Estimator(object):
else:
builder.add_meta_graph(**meta_graph_kwargs)
- def _get_export_outputs_for_spec(self, estimator_spec):
- """Given an `EstimatorSpec`, determine what our export outputs should be.
-
- `EstimatorSpecs` contains `export_outputs` that are used for serving, but
- for
- training and eval graphs, we must wrap the tensors of interest in
- appropriate `tf.estimator.export.ExportOutput` objects.
-
- Args:
- estimator_spec: `tf.estimator.EstimatorSpec` object that will be exported.
-
- Returns:
- a dict mapping `export_output_name` to `tf.estimator.export.ExportOutput`
- object.
-
- Raises:
- ValueError: if an appropriate `ExportOutput` cannot be found for the
- passed `EstimatorSpec.mode`
- """
- mode = estimator_spec.mode
- if mode == model_fn_lib.ModeKeys.PREDICT:
- outputs = estimator_spec.export_outputs
- else:
- if mode == model_fn_lib.ModeKeys.TRAIN:
- output_class = export_output.TrainOutput
- elif mode == model_fn_lib.ModeKeys.EVAL:
- output_class = export_output.EvalOutput
- else:
- raise ValueError(
- 'Export output type not found for mode: {}'.format(mode))
-
- export_out = output_class(
- loss=estimator_spec.loss,
- predictions=estimator_spec.predictions,
- metrics=estimator_spec.eval_metric_ops)
- outputs = {mode: export_out}
-
- return outputs
-
def _get_features_from_input_fn(self, input_fn, mode):
"""Extracts the `features` from return values of `input_fn`."""
result = self._call_input_fn(input_fn, mode)
@@ -1644,21 +1608,6 @@ def maybe_overwrite_model_dir_and_session_config(config, model_dir):
return config
-def create_per_tower_ready_op(scaffold):
- """Create a `tf.train.Scaffold.ready_op` inside a tower."""
- if scaffold.ready_op:
- return scaffold.ready_op
-
- def default_ready_op():
- return array_ops.concat([
- variables.report_uninitialized_variables(),
- resources.report_uninitialized_resources()
- ], 0)
-
- return monitored_session.Scaffold.get_or_default(
- 'ready_op', ops.GraphKeys.READY_OP, default_ready_op)
-
-
def create_per_tower_ready_for_local_init_op(scaffold):
"""Create a `tf.train.Scaffold.ready_for_local_init_op` inside a tower."""
if scaffold.ready_for_local_init_op:
@@ -1708,11 +1657,9 @@ def _combine_distributed_scaffold(grouped_scaffold, distribution):
return value[0]
ready_op = distribution.call_for_each_tower(
- create_per_tower_ready_op, grouped_scaffold)
+ lambda scaffold: scaffold.ready_op, grouped_scaffold)
if ready_op is not None:
ready_op = _unwrap_and_concat(ready_op)
- else:
- ready_op = None
ready_for_local_init_op = distribution.call_for_each_tower(
create_per_tower_ready_for_local_init_op, grouped_scaffold)
@@ -2109,7 +2056,7 @@ class WarmStartSettings(
var_name_to_vocab_info: [Optional] Dict of variable names (strings) to
`tf.estimator.VocabInfo`. The variable names should be "full" variables,
not the names of the partitions. If not explicitly provided, the variable
- is assumed to have no vocabulary.
+ is assumed to have no (changes to) vocabulary.
var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to
name of the previously-trained variable in `ckpt_to_initialize_from`. If
not explicitly provided, the name of the variable is assumed to be same
diff --git a/tensorflow/python/estimator/inputs/numpy_io_test.py b/tensorflow/python/estimator/inputs/numpy_io_test.py
index 4e7b00b307..632908415f 100644
--- a/tensorflow/python/estimator/inputs/numpy_io_test.py
+++ b/tensorflow/python/estimator/inputs/numpy_io_test.py
@@ -42,7 +42,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
@@ -68,7 +68,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
features, target = input_fn()
@@ -93,7 +93,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=0)
features, target = input_fn()
@@ -114,7 +114,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -27)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=1)
features, target = input_fn()
@@ -150,7 +150,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -29)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=3)
features, target = input_fn()
@@ -196,7 +196,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=1)
features, target = input_fn()
@@ -221,7 +221,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
@@ -240,7 +240,7 @@ class NumpyIoTest(test.TestCase):
def testNumpyInputFnWithXAsNonDict(self):
x = list(range(32, 36))
y = np.arange(4)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(TypeError, 'x must be a dict or array'):
failing_input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
@@ -249,7 +249,7 @@ class NumpyIoTest(test.TestCase):
def testNumpyInputFnWithXIsEmptyDict(self):
x = {}
y = np.arange(4)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(ValueError, 'x cannot be an empty'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
failing_input_fn()
@@ -257,7 +257,7 @@ class NumpyIoTest(test.TestCase):
def testNumpyInputFnWithXIsEmptyArray(self):
x = np.array([[], []])
y = np.arange(4)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(ValueError, 'x cannot be an empty'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
failing_input_fn()
@@ -268,7 +268,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = None
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features_tensor = input_fn()
@@ -291,7 +291,7 @@ class NumpyIoTest(test.TestCase):
def testNumpyInputFnWithNonBoolShuffle(self):
x = np.arange(32, 36)
y = np.arange(4)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(ValueError,
'shuffle must be provided and explicitly '
'set as boolean'):
@@ -303,7 +303,7 @@ class NumpyIoTest(test.TestCase):
x = {'__target_key__': array}
y = np.arange(4)
- with self.test_session():
+ with self.cached_session():
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
input_fn()
@@ -318,7 +318,7 @@ class NumpyIoTest(test.TestCase):
x_mismatch_length = {'a': np.arange(1), 'b': b}
y_longer_length = np.arange(10)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
ValueError, 'Length of tensors in x and y is mismatched.'):
failing_input_fn = numpy_io.numpy_input_fn(
@@ -341,7 +341,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = {'y1': np.arange(-32, -28), 'y2': np.arange(32, 28, -1)}
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features_tensor, targets_tensor = input_fn()
@@ -369,7 +369,7 @@ class NumpyIoTest(test.TestCase):
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = {}
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(ValueError, 'y cannot be empty'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
failing_input_fn()
@@ -379,7 +379,7 @@ class NumpyIoTest(test.TestCase):
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = {'y1': np.arange(-32, -28), 'a': a, 'y2': np.arange(32, 28, -1), 'b': b}
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
ValueError, '2 duplicate keys are found in both x and y'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
diff --git a/tensorflow/python/estimator/inputs/pandas_io_test.py b/tensorflow/python/estimator/inputs/pandas_io_test.py
index 6f13bc95d2..9e69fc72dc 100644
--- a/tensorflow/python/estimator/inputs/pandas_io_test.py
+++ b/tensorflow/python/estimator/inputs/pandas_io_test.py
@@ -102,7 +102,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
@@ -116,7 +116,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFnWhenYIsDataFrame_ProducesExpectedOutput(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
@@ -131,7 +131,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFnYIsDataFrame_HandlesOverlappingColumns(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
y = y.rename(columns={'a_target': 'a', 'b_target': 'b'})
input_fn = pandas_io.pandas_input_fn(
@@ -147,7 +147,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFnYIsDataFrame_HandlesOverlappingColumnsInTargets(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
y = y.rename(columns={'a_target': 'a', 'b_target': 'a_n'})
input_fn = pandas_io.pandas_input_fn(
@@ -163,7 +163,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
@@ -191,7 +191,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
@@ -230,7 +230,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
@@ -243,7 +243,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
@@ -266,7 +266,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
@@ -276,7 +276,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
@@ -286,7 +286,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
@@ -297,7 +297,7 @@ class PandasIoTest(test.TestCase):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
diff --git a/tensorflow/python/estimator/keras.py b/tensorflow/python/estimator/keras.py
index 6361c6acc1..6b2765be82 100644
--- a/tensorflow/python/estimator/keras.py
+++ b/tensorflow/python/estimator/keras.py
@@ -182,10 +182,58 @@ def _clone_and_build_model(mode,
K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN)
input_tensors, target_tensors = _convert_estimator_io_to_keras(
keras_model, features, labels)
- return models.clone_and_build_model(
+
+ compile_clone = (mode != model_fn_lib.ModeKeys.PREDICT)
+
+ global_step = None
+ if compile_clone:
+ # Set iterations to the global step created by tf.train.create_global_step()
+ # which is automatically run in the estimator framework.
+ global_step = training_util.get_or_create_global_step()
+ K.track_variable(global_step)
+
+ clone = models.clone_and_build_model(
keras_model, input_tensors, target_tensors, custom_objects,
- compile_clone=(mode != model_fn_lib.ModeKeys.PREDICT),
- in_place_reset=(not keras_model._is_graph_network))
+ compile_clone=compile_clone,
+ in_place_reset=(not keras_model._is_graph_network),
+ optimizer_iterations=global_step)
+
+ return clone
+
+
+def _convert_keras_metrics_to_estimator(model):
+ """Convert metrics from a Keras model to ops used by the Estimator framework.
+
+ Args:
+ model: A `tf.keras.Model` object.
+
+ Returns:
+ Dictionary mapping metric names to tuples of (value, update) ops. May return
+ `None` if the model does not contain any metrics.
+ """
+ if not getattr(model, 'metrics', None):
+ return None
+
+ # TODO(psv/fchollet): support stateful metrics
+ eval_metric_ops = {}
+ # When each metric maps to an output
+ if isinstance(model.metrics, dict):
+ for i, output_name in enumerate(model.metrics.keys()):
+ metric_name = model.metrics[output_name]
+ if callable(metric_name):
+ metric_name = metric_name.__name__
+ # When some outputs use the same metric
+ if list(model.metrics.values()).count(metric_name) > 1:
+ metric_name += '_' + output_name
+ eval_metric_ops[metric_name] = metrics_module.mean(
+ model.metrics_tensors[i - len(model.metrics)])
+ else:
+ for i, metric_name in enumerate(model.metrics):
+ if callable(metric_name):
+ metric_name = metric_name.__name__
+ eval_metric_ops[metric_name] = metrics_module.mean(
+ model.metrics_tensors[i])
+ return eval_metric_ops
def _create_keras_model_fn(keras_model, custom_objects=None):
@@ -237,26 +285,7 @@ def _create_keras_model_fn(keras_model, custom_objects=None):
model._make_test_function() # pylint: disable=protected-access
loss = model.total_loss
- if model.metrics:
- # TODO(psv/fchollet): support stateful metrics
- eval_metric_ops = {}
- # When each metric maps to an output
- if isinstance(model.metrics, dict):
- for i, output_name in enumerate(model.metrics.keys()):
- metric_name = model.metrics[output_name]
- if callable(metric_name):
- metric_name = metric_name.__name__
- # When some outputs use the same metric
- if list(model.metrics.values()).count(metric_name) > 1:
- metric_name += '_' + output_name
- eval_metric_ops[metric_name] = metrics_module.mean(
- model.metrics_tensors[i - len(model.metrics)])
- else:
- for i, metric_name in enumerate(model.metrics):
- if callable(metric_name):
- metric_name = metric_name.__name__
- eval_metric_ops[metric_name] = metrics_module.mean(
- model.metrics_tensors[i])
+ eval_metric_ops = _convert_keras_metrics_to_estimator(model)
# Set train_op only during train.
if mode is model_fn_lib.ModeKeys.TRAIN:
diff --git a/tensorflow/python/estimator/keras_test.py b/tensorflow/python/estimator/keras_test.py
index 290c4604ce..3758243d7b 100644
--- a/tensorflow/python/estimator/keras_test.py
+++ b/tensorflow/python/estimator/keras_test.py
@@ -26,20 +26,23 @@ import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
+from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import keras as keras_lib
+from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config as run_config_lib
-from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizers import SGD
from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import rmsprop
from tensorflow.python.training import session_run_hook
+from tensorflow.python.training import training_util
try:
@@ -90,6 +93,58 @@ def simple_subclassed_model():
return SimpleModel()
+def gen_input_fn(x, y=None, batch_size=128, num_epochs=1, shuffle=False):
+ def input_fn():
+ ds = dataset_ops.Dataset.from_tensor_slices((x, y) if y is not None else x)
+ if shuffle:
+ ds = ds.shuffle(1000)
+ return ds.repeat(num_epochs).batch(batch_size)
+ return input_fn
+
+
+def get_multi_inputs_multi_outputs_data():
+ (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
+ train_samples=_TRAIN_SIZE,
+ test_samples=50,
+ input_shape=(16,),
+ num_classes=3,
+ random_seed=_RANDOM_SEED)
+ (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
+ train_samples=_TRAIN_SIZE,
+ test_samples=50,
+ input_shape=(16,),
+ num_classes=2,
+ random_seed=_RANDOM_SEED)
+ (m_train, _), (m_test, _) = testing_utils.get_test_data(
+ train_samples=_TRAIN_SIZE,
+ test_samples=50,
+ input_shape=(8,),
+ num_classes=2,
+ random_seed=_RANDOM_SEED)
+
+ c_train = keras.utils.to_categorical(c_train)
+ c_test = keras.utils.to_categorical(c_test)
+ d_train = keras.utils.to_categorical(d_train)
+ d_test = keras.utils.to_categorical(d_test)
+
+ train_data = {
+ 'input_a': a_train,
+ 'input_b': b_train,
+ 'input_m': m_train,
+ 'output_c': c_train,
+ 'output_d': d_train
+ }
+ test_data = {
+ 'input_a': a_test,
+ 'input_b': b_test,
+ 'input_m': m_test,
+ 'output_c': c_test,
+ 'output_d': d_test
+ }
+
+ return (train_data, test_data)
+
+
def get_resource_for_simple_model(model_type='sequential',
is_evaluate=False,):
if model_type == 'sequential':
@@ -117,19 +172,19 @@ def get_resource_for_simple_model(model_type='sequential',
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
- train_input_fn = numpy_io.numpy_input_fn(
+ train_input_fn = gen_input_fn(
x=randomize_io_type(x_train, input_name),
y=randomize_io_type(y_train, output_name),
shuffle=False,
num_epochs=None,
batch_size=16)
- evaluate_input_fn = numpy_io.numpy_input_fn(
+ evaluate_input_fn = gen_input_fn(
x=randomize_io_type(x_test, input_name),
y=randomize_io_type(y_test, output_name),
num_epochs=1, shuffle=False)
- predict_input_fn = numpy_io.numpy_input_fn(
+ predict_input_fn = gen_input_fn(
x=randomize_io_type(x_test, input_name), num_epochs=1, shuffle=False)
inference_input_fn = evaluate_input_fn if is_evaluate else predict_input_fn
@@ -147,20 +202,21 @@ def randomize_io_type(array, name):
def multi_inputs_multi_outputs_model():
- a = keras.layers.Input(shape=(16,), name='input_a')
- b = keras.layers.Input(shape=(16,), name='input_b')
- m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
+ input_a = keras.layers.Input(shape=(16,), name='input_a')
+ input_b = keras.layers.Input(shape=(16,), name='input_b')
+ input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
- a_2 = dense(a)
+ interm_a = dense(input_a)
# Read m
- m_2 = keras.layers.Lambda(gen_parsing_ops.string_to_number)(m)
- s_2 = keras.layers.Lambda(lambda k: k[0] * k[1])([m_2, a_2])
- b_2 = dense(b)
- merged = keras.layers.concatenate([s_2, b_2], name='merge')
- c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
- d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
- model = keras.models.Model(inputs=[a, b, m], outputs=[c, d])
+ interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
+ interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
+ interm_b = dense(input_b)
+ merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
+ output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
+ output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
+ model = keras.models.Model(
+ inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
@@ -203,7 +259,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
optimizer='rmsprop',
metrics=['mse', keras.metrics.categorical_accuracy])
- with self.test_session():
+ with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=self._config)
before_eval_results = est_keras.evaluate(
@@ -228,7 +284,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
metrics=['mse', keras.metrics.categorical_accuracy])
my_hook = MyHook()
- with self.test_session():
+ with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=self._config)
before_eval_results = est_keras.evaluate(
@@ -252,7 +308,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
optimizer=rmsprop.RMSPropOptimizer(1e-3),
metrics=['mse', keras.metrics.categorical_accuracy])
my_hook = MyHook()
- with self.test_session():
+ with self.cached_session():
keras_model.fit(x_train, y_train, epochs=1)
keras_est = keras_lib.model_to_estimator(
@@ -274,7 +330,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
optimizer=rmsprop.RMSPropOptimizer(1e-3),
metrics=['mse', keras.metrics.categorical_accuracy])
- with self.test_session():
+ with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model,
config=self._config)
@@ -297,7 +353,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
optimizer=rmsprop.RMSPropOptimizer(1e-3),
metrics=['mse', keras.metrics.categorical_accuracy])
- with self.test_session():
+ with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=self._config)
est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
@@ -316,7 +372,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
optimizer=rmsprop.RMSPropOptimizer(1e-3),
metrics=['mse', keras.metrics.categorical_accuracy])
- with self.test_session():
+ with self.cached_session():
# Create state
keras_model.train_on_batch(np.random.random((10,) + _INPUT_SIZE),
np.random.random((10, _NUM_CLASS)))
@@ -343,7 +399,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
x_test, y_test), _, eval_input_fn = get_resource_for_simple_model(
model_type='functional', is_evaluate=True)
- with self.test_session():
+ with self.cached_session():
metrics = [
'binary_accuracy', 'binary_crossentropy', 'categorical_accuracy',
'categorical_crossentropy', 'cosine_proximity', 'hinge',
@@ -357,7 +413,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
keras_model.fit(x_train, y_train, epochs=1)
keras_eval = keras_model.evaluate(x_test, y_test, batch_size=32)
- with self.test_session():
+ with self.cached_session():
keras_est = keras_lib.model_to_estimator(
keras_model=keras_model, config=self._config)
est_eval = keras_est.evaluate(input_fn=eval_input_fn)
@@ -385,7 +441,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
x_test, _), _, pred_input_fn = get_resource_for_simple_model(
model_type='sequential', is_evaluate=False)
- with self.test_session():
+ with self.cached_session():
keras_model.compile(
loss='categorical_crossentropy',
optimizer='adam',
@@ -393,7 +449,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
keras_model.fit(x_train, y_train, epochs=1)
keras_pred = [np.argmax(y) for y in keras_model.predict(x_test)]
- with self.test_session():
+ with self.cached_session():
keras_est = keras_lib.model_to_estimator(
keras_model=keras_model, config=self._config)
est_pred = [
@@ -402,51 +458,85 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
]
self.assertAllEqual(est_pred, keras_pred)
- def test_multi_inputs_multi_outputs(self):
- np.random.seed(_RANDOM_SEED)
- (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
- train_samples=_TRAIN_SIZE,
- test_samples=50,
- input_shape=(16,),
- num_classes=3)
- np.random.seed(_RANDOM_SEED)
- (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
- train_samples=_TRAIN_SIZE,
- test_samples=50,
- input_shape=(16,),
- num_classes=2)
- np.random.seed(_RANDOM_SEED)
- (input_m_train, _), (input_m_test, _) = testing_utils.get_test_data(
- train_samples=_TRAIN_SIZE,
- test_samples=50,
- input_shape=(8,),
- num_classes=2)
-
- c_train = keras.utils.to_categorical(c_train)
- c_test = keras.utils.to_categorical(c_test)
- d_train = keras.utils.to_categorical(d_train)
- d_test = keras.utils.to_categorical(d_test)
+ def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self):
+ train_data, test_data = get_multi_inputs_multi_outputs_data()
def train_input_fn():
- input_dict = {'input_a': a_train, 'input_b': b_train,
- 'input_m': input_m_train.astype(np.str)}
- output_dict = {'dense_2': c_train, 'dense_3': d_train}
+ input_dict = {
+ 'input_a': train_data['input_a'],
+ 'input_b': train_data['input_b'],
+ 'input_m': train_data['input_m'].astype(np.str)
+ }
+ output_dict = {
+ 'dense_2': train_data['output_c'],
+ 'dense_3': train_data['output_d']
+ }
return input_dict, output_dict
def eval_input_fn():
- input_dict = {'input_a': a_test, 'input_b': b_test,
- 'input_m': input_m_test.astype(np.str)}
- output_dict = {'dense_2': c_test, 'dense_3': d_test}
+ input_dict = {
+ 'input_a': test_data['input_a'],
+ 'input_b': test_data['input_b'],
+ 'input_m': test_data['input_m'].astype(np.str)
+ }
+ output_dict = {
+ 'dense_2': test_data['output_c'],
+ 'dense_3': test_data['output_d']
+ }
return input_dict, output_dict
- with self.test_session():
+ def pred_input_fn():
+ input_dict = {
+ 'input_a': test_data['input_a'],
+ 'input_b': test_data['input_b'],
+ 'input_m': test_data['input_m'].astype(np.str)
+ }
+ return input_dict
+
+ self.do_test_multi_inputs_multi_outputs_with_input_fn(
+ train_input_fn, eval_input_fn, pred_input_fn)
+
+ def test_multi_inputs_multi_outputs_with_input_fn_as_list(self):
+ train_data, test_data = get_multi_inputs_multi_outputs_data()
+
+ def train_input_fn():
+ input_list = [
+ train_data['input_a'], train_data['input_b'],
+ train_data['input_m'].astype(np.str)
+ ]
+ output_list = [train_data['output_c'], train_data['output_d']]
+ return input_list, output_list
+
+ def eval_input_fn():
+ input_list = [
+ test_data['input_a'], test_data['input_b'],
+ test_data['input_m'].astype(np.str)
+ ]
+ output_list = [test_data['output_c'], test_data['output_d']]
+ return input_list, output_list
+
+ def pred_input_fn():
+ input_list = [
+ test_data['input_a'], test_data['input_b'],
+ test_data['input_m'].astype(np.str)
+ ]
+ return input_list
+
+ self.do_test_multi_inputs_multi_outputs_with_input_fn(
+ train_input_fn, eval_input_fn, pred_input_fn)
+
+ def do_test_multi_inputs_multi_outputs_with_input_fn(
+ self, train_input_fn, eval_input_fn, pred_input_fn):
+ with self.cached_session():
model = multi_inputs_multi_outputs_model()
est_keras = keras_lib.model_to_estimator(
keras_model=model, config=self._config)
- before_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
+ baseline_eval_results = est_keras.evaluate(
+ input_fn=eval_input_fn, steps=1)
est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
- after_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
- self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
+ eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
+ self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
+ est_keras.predict(input_fn=pred_input_fn)
def test_init_from_file(self):
if h5py is None:
@@ -456,7 +546,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
x_test, _), _, pred_input_fn = get_resource_for_simple_model(
model_type='functional', is_evaluate=False)
- with self.test_session():
+ with self.cached_session():
keras_model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
@@ -466,7 +556,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
fname = os.path.join(self._base_dir, 'keras_model.h5')
keras.models.save_model(keras_model, fname)
- with self.test_session():
+ with self.cached_session():
keras_est = keras_lib.model_to_estimator(
keras_model_path=fname, config=self._config)
est_pred = [
@@ -479,19 +569,19 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
with self.assertRaisesRegexp(ValueError, 'Either'):
keras_lib.model_to_estimator()
- with self.test_session():
+ with self.cached_session():
keras_model = simple_sequential_model()
with self.assertRaisesRegexp(ValueError, 'not both'):
keras_lib.model_to_estimator(
keras_model=keras_model,
keras_model_path=tempfile.mkdtemp(dir=self._base_dir))
- with self.test_session():
+ with self.cached_session():
keras_model = simple_sequential_model()
with self.assertRaisesRegexp(ValueError, 'compiled'):
keras_lib.model_to_estimator(keras_model=keras_model)
- with self.test_session():
+ with self.cached_session():
keras_model = simple_sequential_model()
with self.assertRaisesRegexp(ValueError, 'not a local path'):
keras_lib.model_to_estimator(
@@ -516,10 +606,10 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
model = simple_functional_model()
model.compile(
loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
- with self.test_session():
+ with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=model, config=self._config)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(KeyError,
'Difference: .*invalid_input_name'):
est_keras.train(input_fn=invald_input_name_input_fn, steps=100)
@@ -547,20 +637,20 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
y_train = keras.utils.to_categorical(y_train, 2)
input_name = keras_model.input_names[0]
output_name = keras_model.output_names[0]
- train_input_fn = numpy_io.numpy_input_fn(
+ train_input_fn = gen_input_fn(
x=randomize_io_type(x_train, input_name),
y=randomize_io_type(y_train, output_name),
shuffle=False,
num_epochs=None,
batch_size=16)
with self.assertRaisesRegexp(ValueError, 'relu6'):
- with self.test_session():
+ with self.cached_session():
est = keras_lib.model_to_estimator(
keras_model=keras_model,
model_dir=tempfile.mkdtemp(dir=self._base_dir))
est.train(input_fn=train_input_fn, steps=1)
- with self.test_session():
+ with self.cached_session():
est = keras_lib.model_to_estimator(
keras_model=keras_model,
model_dir=tempfile.mkdtemp(dir=self._base_dir),
@@ -586,7 +676,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
}
})
with test.mock.patch.dict('os.environ', {'TF_CONFIG': tf_config}):
- with self.test_session():
+ with self.cached_session():
keras_lib.model_to_estimator(
keras_model=keras_model,
model_dir=tempfile.mkdtemp(dir=self._base_dir))
@@ -602,7 +692,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
gpu_options = config_pb2.GPUOptions(per_process_gpu_memory_fraction=0.3)
sess_config = config_pb2.ConfigProto(gpu_options=gpu_options)
self._config._session_config = sess_config
- with self.test_session():
+ with self.cached_session():
keras_lib.model_to_estimator(
keras_model=keras_model, config=self._config)
self.assertEqual(
@@ -618,7 +708,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
optimizer='rmsprop',
metrics=['mse', keras.metrics.categorical_accuracy])
- with self.test_session():
+ with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, model_dir=self._base_dir,
config=run_config_lib.RunConfig())
@@ -629,7 +719,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
self.assertEqual(self._base_dir, est_keras._config.model_dir)
self.assertEqual(self._base_dir, est_keras._model_dir)
- with self.test_session():
+ with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, model_dir=self._base_dir,
config=None)
@@ -648,7 +738,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
optimizer='rmsprop',
metrics=['mse', keras.metrics.categorical_accuracy])
- with self.test_session():
+ with self.cached_session():
with test.mock.patch.object(tempfile, 'mkdtemp', return_value=_TMP_DIR):
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model,
@@ -663,7 +753,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
optimizer='rmsprop',
metrics=['mse', keras.metrics.categorical_accuracy])
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(ValueError, '`model_dir` are set both in '
'constructor and `RunConfig`'):
keras_lib.model_to_estimator(
@@ -676,7 +766,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
loss='categorical_crossentropy',
optimizer=rmsprop.RMSPropOptimizer(1e-3),
metrics=['mse', keras.metrics.categorical_accuracy])
- with self.test_session():
+ with self.cached_session():
keras_model.train_on_batch(
np.random.random((10,) + _INPUT_SIZE),
np.random.random((10, _NUM_CLASS)))
@@ -690,6 +780,32 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
keras_lib.model_to_estimator(
keras_model=keras_model, config=self._config)
+ def assert_increasing_global_step(self, optimizer):
+ keras_model, _, _, train_input_fn, _ = get_resource_for_simple_model(
+ model_type='sequential', is_evaluate=True)
+ keras_model.compile(
+ loss='categorical_crossentropy',
+ optimizer=optimizer,
+ metrics=['mse', keras.metrics.categorical_accuracy])
+ with self.cached_session() as sess:
+ keras_model_fn = keras_lib._create_keras_model_fn(keras_model)
+ global_step = training_util.create_global_step()
+ features, labels = train_input_fn().make_one_shot_iterator().get_next()
+ spec = keras_model_fn(features, labels, mode=model_fn_lib.ModeKeys.TRAIN)
+
+ sess.run(variables.global_variables_initializer())
+ sess.run(variables.local_variables_initializer())
+
+ self.assertEqual(global_step.eval(), 0) # Sanity check
+ sess.run(spec.train_op)
+ self.assertEqual(global_step.eval(), 1)
+
+ def test_model_fn_increments_global_step_tf_optimizer(self):
+ self.assert_increasing_global_step(rmsprop.RMSPropOptimizer(1e-3))
+
+ def test_model_fn_increments_global_step_keras_optimizer(self):
+ self.assert_increasing_global_step('rmsprop')
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/python/estimator/model_fn.py b/tensorflow/python/estimator/model_fn.py
index fd2787aeaf..439cc2e3a4 100644
--- a/tensorflow/python/estimator/model_fn.py
+++ b/tensorflow/python/estimator/model_fn.py
@@ -142,7 +142,7 @@ class EstimatorSpec(
prediction.
predictions: Predictions `Tensor` or dict of `Tensor`.
loss: Training loss `Tensor`. Must be either scalar, or with shape `[1]`.
- train_op: Op to run one training step.
+ train_op: Op for the training step.
eval_metric_ops: Dict of metric results keyed by name.
The values of the dict can be one of the following:
(1) instance of `Metric` class.
@@ -475,3 +475,44 @@ def _check_is_tensor(x, tensor_name):
if not isinstance(x, ops.Tensor):
raise TypeError('{} must be Tensor, given: {}'.format(tensor_name, x))
return x
+
+
+def export_outputs_for_mode(
+ mode, serving_export_outputs=None, predictions=None, loss=None,
+ metrics=None):
+ """Util function for constructing a `ExportOutput` dict given a mode.
+
+ The returned dict can be directly passed to `build_all_signature_defs` helper
+ function as the `export_outputs` argument, used for generating a SignatureDef
+ map.
+
+ Args:
+ mode: A `ModeKeys` specifying the mode.
+ serving_export_outputs: Describes the output signatures to be exported to
+ `SavedModel` and used during serving. Should be a dict or None.
+ predictions: A dict of Tensors or single Tensor representing model
+ predictions. This argument is only used if serving_export_outputs is not
+ set.
+ loss: A dict of Tensors or single Tensor representing calculated loss.
+ metrics: A dict of (metric_value, update_op) tuples, or a single tuple.
+ metric_value must be a Tensor, and update_op must be a Tensor or Op
+
+ Returns:
+ Dictionary mapping the a key to an `tf.estimator.export.ExportOutput` object
+ The key is the expected SignatureDef key for the mode.
+
+ Raises:
+ ValueError: if an appropriate ExportOutput cannot be found for the mode.
+ """
+ # TODO(b/113185250): move all model export helper functions into an util file.
+ if mode == ModeKeys.PREDICT:
+ return _get_export_outputs(serving_export_outputs, predictions)
+ elif mode == ModeKeys.TRAIN:
+ return {mode: export_output_lib.TrainOutput(
+ loss=loss, predictions=predictions, metrics=metrics)}
+ elif mode == ModeKeys.EVAL:
+ return {mode: export_output_lib.EvalOutput(
+ loss=loss, predictions=predictions, metrics=metrics)}
+ else:
+ raise ValueError(
+ 'Export output type not found for mode: {}'.format(mode))
diff --git a/tensorflow/python/estimator/run_config.py b/tensorflow/python/estimator/run_config.py
index b1ca207b62..3773810a04 100644
--- a/tensorflow/python/estimator/run_config.py
+++ b/tensorflow/python/estimator/run_config.py
@@ -521,7 +521,12 @@ class RunConfig(object):
eval_distribute=eval_distribute,
experimental_distribute=experimental_distribute)
- if train_distribute or eval_distribute or experimental_distribute:
+ # TODO(frankchn,priyag): Eventually use distributed coordinator for TPUs.
+ if ((train_distribute and
+ train_distribute.__class__.__name__ != 'TPUStrategy') or
+ (eval_distribute and
+ eval_distribute.__class__.__name__ != 'TPUStrategy') or
+ experimental_distribute):
logging.info('Initializing RunConfig with distribution strategies.')
distribute_coordinator_training.init_run_config(self, tf_config)
else: