aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/slim
diff options
context:
space:
mode:
authorGravatar XFeiF <eva.aeolus@gmail.com>2018-07-21 22:59:03 +0800
committerGravatar XFeiF <eva.aeolus@gmail.com>2018-07-21 22:59:03 +0800
commit828257c82a6dfc1537547d226b25b7e394ff3cd4 (patch)
tree1f2a935928838f6d7c88fd152814bef4f2eba08e /tensorflow/contrib/slim
parentdae7a75734f2137aae7130e064fab9dfcb799c45 (diff)
[tf.contrib.slim] Update documentation in evaluation.py
Diffstat (limited to 'tensorflow/contrib/slim')
-rw-r--r--tensorflow/contrib/slim/python/slim/evaluation.py25
1 files changed, 15 insertions, 10 deletions
diff --git a/tensorflow/contrib/slim/python/slim/evaluation.py b/tensorflow/contrib/slim/python/slim/evaluation.py
index 5cfd5ee82e..0feb3925eb 100644
--- a/tensorflow/contrib/slim/python/slim/evaluation.py
+++ b/tensorflow/contrib/slim/python/slim/evaluation.py
@@ -22,7 +22,8 @@ modules using a variety of metrics and summarizing the results.
**********************
In the simplest use case, we use a model to create the predictions, then specify
-the metrics and finally call the `evaluation` method:
+the metrics and choose one model checkpoint, finally call the`evaluation_once`
+method:
# Create model and obtain the predictions:
images, labels = LoadData(...)
@@ -34,20 +35,24 @@ the metrics and finally call the `evaluation` method:
"mse": slim.metrics.mean_squared_error(predictions, labels),
})
+ checkpoint_path = '/tmp/my_model_dir/my_checkpoint'
+ log_dir = '/tmp/my_model_eval/'
+
initial_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer())
- with tf.Session() as sess:
- metric_values = slim.evaluation(
- sess,
- num_evals=1,
- initial_op=initial_op,
- eval_op=names_to_updates.values(),
- final_op=name_to_values.values())
+ metric_values = slim.evaluate_once(
+ master='',
+ checkpoint_path=checkpoint_path,
+ log_dir=log_dir,
+ num_evals=1,
+ initial_op=initial_op,
+ eval_op=names_to_updates.values(),
+ final_op=name_to_values.values())
- for metric, value in zip(names_to_values.keys(), metric_values):
- logging.info('Metric %s has value: %f', metric, value)
+ for metric, value in zip(names_to_values.keys(), metric_values):
+ logging.info('Metric %s has value: %f', metric, value)
************************************************
* Evaluating a Checkpointed Model with Metrics *