aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/estimator/canned
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-07-11 11:11:03 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-07-11 11:16:23 -0700
commite103c7b4fb2ac6ebf9472b3c2b01c35222872ae0 (patch)
tree079b6e9e51c575ce5a03f01c3a19546ff2410688 /tensorflow/python/estimator/canned
parent158cd6220231fcf758a45c2dcd40d93cd0aec9e0 (diff)
Add average prediction, average label to regression head.
PiperOrigin-RevId: 204154837
Diffstat (limited to 'tensorflow/python/estimator/canned')
-rw-r--r--tensorflow/python/estimator/canned/baseline_test.py10
-rw-r--r--tensorflow/python/estimator/canned/dnn_testing_utils.py4
-rw-r--r--tensorflow/python/estimator/canned/head.py22
-rw-r--r--tensorflow/python/estimator/canned/head_test.py31
-rw-r--r--tensorflow/python/estimator/canned/linear_testing_utils.py14
5 files changed, 63 insertions, 18 deletions
diff --git a/tensorflow/python/estimator/canned/baseline_test.py b/tensorflow/python/estimator/canned/baseline_test.py
index 7bf2e62da9..e46a3a156d 100644
--- a/tensorflow/python/estimator/canned/baseline_test.py
+++ b/tensorflow/python/estimator/canned/baseline_test.py
@@ -154,6 +154,8 @@ class BaselineRegressorEvaluationTest(test.TestCase):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -176,6 +178,8 @@ class BaselineRegressorEvaluationTest(test.TestCase):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -204,6 +208,8 @@ class BaselineRegressorEvaluationTest(test.TestCase):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -229,7 +235,9 @@ class BaselineRegressorEvaluationTest(test.TestCase):
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
- ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
+ eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
diff --git a/tensorflow/python/estimator/canned/dnn_testing_utils.py b/tensorflow/python/estimator/canned/dnn_testing_utils.py
index ba17821259..de226ed0ef 100644
--- a/tensorflow/python/estimator/canned/dnn_testing_utils.py
+++ b/tensorflow/python/estimator/canned/dnn_testing_utils.py
@@ -1271,6 +1271,8 @@ class BaseDNNRegressorEvaluateTest(object):
self.assertAllClose({
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
+ metric_keys.MetricKeys.PREDICTION_MEAN: -2.08,
+ metric_keys.MetricKeys.LABEL_MEAN: 1.0,
ops.GraphKeys.GLOBAL_STEP: global_step
}, dnn_regressor.evaluate(input_fn=_input_fn, steps=1))
@@ -1301,6 +1303,8 @@ class BaseDNNRegressorEvaluateTest(object):
self.assertAllClose({
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / label_dimension,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 0.39 / 3.0,
+ metric_keys.MetricKeys.LABEL_MEAN: 0.5 / 3.0,
ops.GraphKeys.GLOBAL_STEP: global_step
}, dnn_regressor.evaluate(input_fn=_input_fn, steps=1))
diff --git a/tensorflow/python/estimator/canned/head.py b/tensorflow/python/estimator/canned/head.py
index b74ef1015c..da9a64c2bc 100644
--- a/tensorflow/python/estimator/canned/head.py
+++ b/tensorflow/python/estimator/canned/head.py
@@ -1398,15 +1398,21 @@ class _RegressionHeadWithMeanSquaredErrorLoss(_Head):
weights=weights,
processed_labels=labels)
- def _eval_metric_ops(self, weights, unreduced_loss, regularization_loss):
+ def _eval_metric_ops(self, predicted_value, labels, weights, unreduced_loss,
+ regularization_loss):
"""Returns the Eval metric ops."""
keys = metric_keys.MetricKeys
# Estimator already adds a metric for loss.
eval_metric_ops = {
_summary_key(self._name, keys.LOSS_MEAN):
- metrics_lib.mean(
- values=unreduced_loss,
- weights=weights)
+ metrics_lib.mean(values=unreduced_loss, weights=weights),
+ _summary_key(self._name, keys.PREDICTION_MEAN):
+ _predictions_mean(
+ predictions=predicted_value,
+ weights=weights,
+ name=keys.PREDICTION_MEAN),
+ _summary_key(self._name, keys.LABEL_MEAN):
+ metrics_lib.mean(values=labels, weights=weights)
}
if regularization_loss is not None:
regularization_loss_key = _summary_key(
@@ -1489,13 +1495,13 @@ class _RegressionHeadWithMeanSquaredErrorLoss(_Head):
predictions=predictions,
loss=regularized_training_loss,
eval_metrics=_create_eval_metrics_tuple(
- self._eval_metric_ops,
- {
+ self._eval_metric_ops, {
+ 'predicted_value': predicted_value,
+ 'labels': labels,
'weights': weights,
'unreduced_loss': unreduced_loss,
'regularization_loss': regularization_loss,
- }
- ))
+ }))
# Train.
if optimizer is not None:
diff --git a/tensorflow/python/estimator/canned/head_test.py b/tensorflow/python/estimator/canned/head_test.py
index 08ce5ca8e8..bd2e0ae943 100644
--- a/tensorflow/python/estimator/canned/head_test.py
+++ b/tensorflow/python/estimator/canned/head_test.py
@@ -3103,8 +3103,10 @@ class RegressionHead(test.TestCase):
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
- self.assertItemsEqual(
- (metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
+ self.assertItemsEqual((metric_keys.MetricKeys.LOSS_MEAN,
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN),
+ spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
@@ -3140,6 +3142,9 @@ class RegressionHead(test.TestCase):
expected_metric_keys = [
'{}/some_regression_head'.format(metric_keys.MetricKeys.LOSS_MEAN),
+ '{}/some_regression_head'.format(
+ metric_keys.MetricKeys.PREDICTION_MEAN),
+ '{}/some_regression_head'.format(metric_keys.MetricKeys.LABEL_MEAN),
]
self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys())
@@ -3170,6 +3175,8 @@ class RegressionHead(test.TestCase):
expected_metrics = {
keys.LOSS_MEAN: expected_unregularized_loss,
keys.LOSS_REGULARIZATION: expected_regularization_loss,
+ keys.PREDICTION_MEAN: (45 + 41) / 2.0,
+ keys.LABEL_MEAN: (43 + 44) / 2.0,
}
# Assert predictions, loss, and metrics.
@@ -3471,8 +3478,10 @@ class RegressionHead(test.TestCase):
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
- self.assertItemsEqual(
- (metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
+ self.assertItemsEqual((metric_keys.MetricKeys.LOSS_MEAN,
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN),
+ spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
@@ -3700,8 +3709,10 @@ class RegressionHead(test.TestCase):
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
- self.assertItemsEqual(
- (metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
+ self.assertItemsEqual((metric_keys.MetricKeys.LOSS_MEAN,
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN),
+ spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
@@ -3832,7 +3843,13 @@ class RegressionHead(test.TestCase):
# losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]
# loss = sum(losses) = 100+.1+1.5 = 101.6
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
- expected_metrics = {metric_keys.MetricKeys.LOSS_MEAN: 39.076923}
+ expected_metrics = {
+ metric_keys.MetricKeys.LOSS_MEAN:
+ 39.076923,
+ metric_keys.MetricKeys.PREDICTION_MEAN:
+ (45 + 41 * 0.1 + 44 * 1.5) / 2.6,
+ metric_keys.MetricKeys.LABEL_MEAN: (35 + 42 * 0.1 + 45 * 1.5) / 2.6,
+ }
# Assert spec contains expected tensors.
self.assertEqual(dtypes.float32, spec.loss.dtype)
diff --git a/tensorflow/python/estimator/canned/linear_testing_utils.py b/tensorflow/python/estimator/canned/linear_testing_utils.py
index 9e9c2f7c4b..c3934c7a80 100644
--- a/tensorflow/python/estimator/canned/linear_testing_utils.py
+++ b/tensorflow/python/estimator/canned/linear_testing_utils.py
@@ -261,6 +261,8 @@ class BaseLinearRegressorEvaluationTest(object):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -286,6 +288,8 @@ class BaseLinearRegressorEvaluationTest(object):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -316,6 +320,8 @@ class BaseLinearRegressorEvaluationTest(object):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -346,7 +352,9 @@ class BaseLinearRegressorEvaluationTest(object):
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
- ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
+ eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
@@ -383,7 +391,9 @@ class BaseLinearRegressorEvaluationTest(object):
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
- ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
+ eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.