aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/tensor_forest
diff options
context:
space:
mode:
authorGravatar Yong Tang <yong.tang.github@outlook.com>2018-06-03 21:51:32 -0700
committerGravatar Gunhan Gulsoy <gunan@google.com>2018-06-03 21:51:32 -0700
commitfd9246a308e77c6c27d5bddcc6646525f3ce5e7b (patch)
tree802568ea9247218a987e91a2eaa3b73004838355 /tensorflow/contrib/tensor_forest
parent63dafb7f5dbef4da63e095595a49f5d5d7258af9 (diff)
Switch from tf.contrib.metrics to tf.metrics (#18783)
* Switch from tf.contrib.metrics to tf.metrics Much of the functions in `tf.contrib.metrics` has been deprecated in favor of `tf.metrics`. This fix switches `tf.contrib.metrics` to `tf.metrics`. Signed-off-by: Yong Tang <yong.tang.github@outlook.com> * Change to `tf.metrics` in evaluation_test.py Signed-off-by: Yong Tang <yong.tang.github@outlook.com> * Fix pylint issue Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
Diffstat (limited to 'tensorflow/contrib/tensor_forest')
-rw-r--r--tensorflow/contrib/tensor_forest/client/eval_metrics.py45
1 files changed, 24 insertions, 21 deletions
diff --git a/tensorflow/contrib/tensor_forest/client/eval_metrics.py b/tensorflow/contrib/tensor_forest/client/eval_metrics.py
index e893e1d1c8..d8236a0a6f 100644
--- a/tensorflow/contrib/tensor_forest/client/eval_metrics.py
+++ b/tensorflow/contrib/tensor_forest/client/eval_metrics.py
@@ -21,10 +21,10 @@ import numpy as np
from tensorflow.contrib import losses
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
-from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
INFERENCE_PROB_NAME = prediction_key.PredictionKey.PROBABILITIES
@@ -38,12 +38,13 @@ def _top_k_generator(k):
targets = math_ops.to_int32(targets)
if targets.get_shape().ndims > 1:
targets = array_ops.squeeze(targets, axis=[1])
- return metric_ops.streaming_mean(nn.in_top_k(probabilities, targets, k))
+ return metrics.mean(nn.in_top_k(probabilities, targets, k))
return _top_k
def _accuracy(predictions, targets, weights=None):
- return metric_ops.streaming_accuracy(predictions, targets, weights=weights)
+ return metrics.accuracy(
+ labels=targets, predictions=predictions, weights=weights)
def _r2(probabilities, targets, weights=None):
@@ -53,7 +54,7 @@ def _r2(probabilities, targets, weights=None):
squares_residuals = math_ops.reduce_sum(
math_ops.square(targets - probabilities), 0)
score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
- return metric_ops.streaming_mean(score, weights=weights)
+ return metrics.mean(score, weights=weights)
def _squeeze_and_onehot(targets, depth):
@@ -62,7 +63,7 @@ def _squeeze_and_onehot(targets, depth):
def _sigmoid_entropy(probabilities, targets, weights=None):
- return metric_ops.streaming_mean(
+ return metrics.mean(
losses.sigmoid_cross_entropy(probabilities,
_squeeze_and_onehot(
targets,
@@ -71,7 +72,7 @@ def _sigmoid_entropy(probabilities, targets, weights=None):
def _softmax_entropy(probabilities, targets, weights=None):
- return metric_ops.streaming_mean(
+ return metrics.mean(
losses.sparse_softmax_cross_entropy(probabilities,
math_ops.to_int32(targets)),
weights=weights)
@@ -82,7 +83,7 @@ def _predictions(predictions, unused_targets, **unused_kwargs):
def _class_log_loss(probabilities, targets, weights=None):
- return metric_ops.streaming_mean(
+ return metrics.mean(
losses.log_loss(probabilities,
_squeeze_and_onehot(targets,
array_ops.shape(probabilities)[1])),
@@ -90,34 +91,36 @@ def _class_log_loss(probabilities, targets, weights=None):
def _precision(predictions, targets, weights=None):
- return metric_ops.streaming_precision(predictions, targets, weights=weights)
+ return metrics.precision(
+ labels=targets, predictions=predictions, weights=weights)
def _precision_at_thresholds(predictions, targets, weights=None):
- return metric_ops.streaming_precision_at_thresholds(
- array_ops.slice(predictions, [0, 1], [-1, 1]),
- targets,
- np.arange(
- 0, 1, 0.01, dtype=np.float32),
+ return metrics.precision_at_thresholds(
+ labels=targets,
+ predictions=array_ops.slice(predictions, [0, 1], [-1, 1]),
+ thresholds=np.arange(0, 1, 0.01, dtype=np.float32),
weights=weights)
def _recall(predictions, targets, weights=None):
- return metric_ops.streaming_recall(predictions, targets, weights=weights)
+ return metrics.recall(
+ labels=targets, predictions=predictions, weights=weights)
def _recall_at_thresholds(predictions, targets, weights=None):
- return metric_ops.streaming_recall_at_thresholds(
- array_ops.slice(predictions, [0, 1], [-1, 1]),
- targets,
- np.arange(
- 0, 1, 0.01, dtype=np.float32),
+ return metrics.recall_at_thresholds(
+ labels=targets,
+ predictions=array_ops.slice(predictions, [0, 1], [-1, 1]),
+ thresholds=np.arange(0, 1, 0.01, dtype=np.float32),
weights=weights)
def _auc(probs, targets, weights=None):
- return metric_ops.streaming_auc(array_ops.slice(probs, [0, 1], [-1, 1]),
- targets, weights=weights)
+ return metrics.auc(
+ labels=targets,
+ predictions=array_ops.slice(probs, [0, 1], [-1, 1]),
+ weights=weights)
_EVAL_METRICS = {