aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/training
diff options
context:
space:
mode:
authorGravatar Allen Lavoie <allenl@google.com>2018-08-02 15:47:43 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-02 15:51:17 -0700
commit1bf206bc82f600886f1e19c9860f09f18984346b (patch)
treefbd6ee10df16e491142017e96120181b81a72ec5 /tensorflow/contrib/training
parent6fbbad97e293cc39bde32495e92614c69a9a7896 (diff)
Split checkpoint management utility functions out of saver.py
Pure refactor, in preparation for adding a higher level checkpoint management utility. This utility will also need to work with the Checkpoint proto, and globbing it on to saver.py seems dirty. PiperOrigin-RevId: 207179646
Diffstat (limited to 'tensorflow/contrib/training')
-rw-r--r--tensorflow/contrib/training/python/training/evaluation.py4
-rw-r--r--tensorflow/contrib/training/python/training/training_test.py3
2 files changed, 4 insertions, 3 deletions
diff --git a/tensorflow/contrib/training/python/training/evaluation.py b/tensorflow/contrib/training/python/training/evaluation.py
index f7fd66d33f..01bac891da 100644
--- a/tensorflow/contrib/training/python/training/evaluation.py
+++ b/tensorflow/contrib/training/python/training/evaluation.py
@@ -142,9 +142,9 @@ from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
+from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
-from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
@@ -189,7 +189,7 @@ def wait_for_new_checkpoint(checkpoint_dir,
logging.info('Waiting for new checkpoint at %s', checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
- checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
+ checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
diff --git a/tensorflow/contrib/training/python/training/training_test.py b/tensorflow/contrib/training/python/training/training_test.py
index 4877c010fa..94cf7788b2 100644
--- a/tensorflow/contrib/training/python/training/training_test.py
+++ b/tensorflow/contrib/training/python/training/training_test.py
@@ -36,6 +36,7 @@ from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
+from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
@@ -421,7 +422,7 @@ class TrainTest(test.TestCase):
train_op = self.create_train_op()
model_variables = variables_lib2.global_variables()
- model_path = saver_lib.latest_checkpoint(logdir1)
+ model_path = checkpoint_management.latest_checkpoint(logdir1)
assign_fn = variables_lib.assign_from_checkpoint_fn(
model_path, model_variables)