aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/slim/python/slim/evaluation_test.py
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/contrib/slim/python/slim/evaluation_test.py')
-rw-r--r--tensorflow/contrib/slim/python/slim/evaluation_test.py46
1 files changed, 5 insertions, 41 deletions
diff --git a/tensorflow/contrib/slim/python/slim/evaluation_test.py b/tensorflow/contrib/slim/python/slim/evaluation_test.py
index 870f504d10..d9e0f54b72 100644
--- a/tensorflow/contrib/slim/python/slim/evaluation_test.py
+++ b/tensorflow/contrib/slim/python/slim/evaluation_test.py
@@ -20,7 +20,6 @@ from __future__ import print_function
import glob
import os
-import shutil
import time
import numpy as np
@@ -30,8 +29,6 @@ from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.slim.python.slim import evaluation
from tensorflow.contrib.training.python.training import evaluation as evaluation_lib
from tensorflow.core.protobuf import saver_pb2
-from tensorflow.python.debug.lib import debug_data
-from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
@@ -233,7 +230,11 @@ class SingleEvaluationTest(test.TestCase):
with self.assertRaises(errors.NotFoundError):
evaluation.evaluate_once('', checkpoint_path, log_dir)
- def _prepareCheckpoint(self, checkpoint_path):
+ def testRestoredModelPerformance(self):
+ checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
+ log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')
+
+ # First, save out the current model to a checkpoint:
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
@@ -241,13 +242,6 @@ class SingleEvaluationTest(test.TestCase):
sess.run(init_op)
saver.save(sess, checkpoint_path)
- def testRestoredModelPerformance(self):
- checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
- log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')
-
- # First, save out the current model to a checkpoint:
- self._prepareCheckpoint(checkpoint_path)
-
# Next, determine the metric to evaluate:
value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
self._labels)
@@ -257,36 +251,6 @@ class SingleEvaluationTest(test.TestCase):
'', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
- def testAdditionalHooks(self):
- checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
- log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')
-
- # First, save out the current model to a checkpoint:
- self._prepareCheckpoint(checkpoint_path)
-
- # Next, determine the metric to evaluate:
- value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
- self._labels)
-
- dumping_root = os.path.join(self.get_temp_dir(), 'tfdbg_dump_dir')
- dumping_hook = hooks.DumpingDebugHook(dumping_root, log_usage=False)
- try:
- # Run the evaluation and verify the results:
- accuracy_value = evaluation.evaluate_once(
- '', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op,
- hooks=[dumping_hook])
- self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
-
- dump = debug_data.DebugDumpDir(
- glob.glob(os.path.join(dumping_root, 'run_*'))[0])
- # Here we simply assert that the dumped data has been loaded and is
- # non-empty. We do not care about the detailed model-internal tensors or
- # their values.
- self.assertTrue(dump.dumped_tensor_data)
- finally:
- if os.path.isdir(dumping_root):
- shutil.rmtree(dumping_root)
-
if __name__ == '__main__':
test.main()