aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/slim/python/slim/evaluation_test.py
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/contrib/slim/python/slim/evaluation_test.py')
-rw-r--r--tensorflow/contrib/slim/python/slim/evaluation_test.py25
1 files changed, 13 insertions, 12 deletions
diff --git a/tensorflow/contrib/slim/python/slim/evaluation_test.py b/tensorflow/contrib/slim/python/slim/evaluation_test.py
index 94fc12ca81..3d0308aaf3 100644
--- a/tensorflow/contrib/slim/python/slim/evaluation_test.py
+++ b/tensorflow/contrib/slim/python/slim/evaluation_test.py
@@ -26,7 +26,6 @@ import time
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
-from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.slim.python.slim import evaluation
from tensorflow.contrib.training.python.training import evaluation as evaluation_lib
from tensorflow.core.protobuf import saver_pb2
@@ -37,6 +36,7 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import metrics
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
@@ -89,8 +89,8 @@ class EvaluationTest(test.TestCase):
self._predictions, self._scale = TestModel(self._inputs)
def testFinalOpsOnEvaluationLoop(self):
- value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
- self._labels)
+ value_op, update_op = metrics.accuracy(
+ labels=self._labels, predictions=self._predictions)
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
# Create checkpoint and log directories:
@@ -136,9 +136,10 @@ class EvaluationTest(test.TestCase):
self.assertTrue(obj.hook_was_run)
def _create_names_to_metrics(self, predictions, labels):
- accuracy0, update_op0 = metric_ops.streaming_accuracy(predictions, labels)
- accuracy1, update_op1 = metric_ops.streaming_accuracy(predictions + 1,
- labels)
+ accuracy0, update_op0 = metrics.accuracy(
+ labels=labels, predictions=predictions)
+ accuracy1, update_op1 = metrics.accuracy(
+ labels=labels, predictions=predictions + 1)
names_to_values = {'Accuracy': accuracy0, 'Another_accuracy': accuracy1}
names_to_updates = {'Accuracy': update_op0, 'Another_accuracy': update_op1}
@@ -198,8 +199,8 @@ class EvaluationTest(test.TestCase):
predictions_limited = input.limit_epochs(self._predictions, num_epochs=1)
labels_limited = input.limit_epochs(self._labels, num_epochs=1)
- value_op, update_op = metric_ops.streaming_accuracy(
- predictions_limited, labels_limited)
+ value_op, update_op = metrics.accuracy(
+ labels=labels_limited, predictions=predictions_limited)
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
@@ -260,8 +261,8 @@ class SingleEvaluationTest(test.TestCase):
self._prepareCheckpoint(checkpoint_path)
# Next, determine the metric to evaluate:
- value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
- self._labels)
+ value_op, update_op = metrics.accuracy(
+ labels=self._labels, predictions=self._predictions)
# Run the evaluation and verify the results:
accuracy_value = evaluation.evaluate_once(
@@ -276,8 +277,8 @@ class SingleEvaluationTest(test.TestCase):
self._prepareCheckpoint(checkpoint_path)
# Next, determine the metric to evaluate:
- value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
- self._labels)
+ value_op, update_op = metrics.accuracy(
+ labels=self._labels, predictions=self._predictions)
dumping_root = os.path.join(self.get_temp_dir(), 'tfdbg_dump_dir')
dumping_hook = hooks.DumpingDebugHook(dumping_root, log_usage=False)