aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/estimator/estimator_test.py
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/python/estimator/estimator_test.py')
-rw-r--r--tensorflow/python/estimator/estimator_test.py66
1 files changed, 0 insertions, 66 deletions
diff --git a/tensorflow/python/estimator/estimator_test.py b/tensorflow/python/estimator/estimator_test.py
index c1b773b8c4..2b9b44523b 100644
--- a/tensorflow/python/estimator/estimator_test.py
+++ b/tensorflow/python/estimator/estimator_test.py
@@ -50,7 +50,6 @@ from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
-from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
@@ -1911,71 +1910,6 @@ class EstimatorExportTest(test.TestCase):
est.train(dummy_input_fn, steps=1)
est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn)
- def test_export_savedmodel_respects_soft_placement(self):
- def model_fn_with_a_gpu_op_but_no_kernel(features, labels, mode):
- _, _ = features, labels
- table = saver_test_utils.CheckpointedOp(name='v2')
-
- update_global_step = state_ops.assign_add(training.get_global_step(), 1)
- with ops.control_dependencies([update_global_step]):
- train_op = table.insert('k1', 30.0)
-
- # In this test, there are no GPUs available. The goal is to verify that
- # export_savedmodel executes nevertheless.
- with ops.device('/gpu:0'):
- string_op = string_ops.as_string(update_global_step)
-
- with ops.control_dependencies([string_op]):
- prediction = table.lookup('k1', 0.0)
-
- return model_fn_lib.EstimatorSpec(
- mode,
- predictions=prediction,
- loss=constant_op.constant(1.),
- train_op=train_op,
- export_outputs={
- 'test': export_output.PredictOutput({
- 'prediction': prediction
- })
- })
-
- tmpdir = tempfile.mkdtemp()
- est = estimator.Estimator(
- model_fn=model_fn_with_a_gpu_op_but_no_kernel)
- est.train(input_fn=dummy_input_fn, steps=1)
- feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
- 'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
- serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
- feature_spec)
- export_dir_base = os.path.join(
- compat.as_bytes(tmpdir), compat.as_bytes('export'))
-
- export_dir = est.export_savedmodel(
- export_dir_base, serving_input_receiver_fn)
-
- # At this point, if export_savedmodel executed with
- # allow_soft_placement=True, then the GPU-assigned operation was silently
- # placed on the CPU. Otherwise, an exception would have been raised
- # related to the fact that the requested GPU device isn't available.
-
- # Expectations below assume that export_savedmodel has completed normally.
- self.assertTrue(gfile.Exists(export_dir_base))
- self.assertTrue(gfile.Exists(export_dir))
- self.assertTrue(gfile.Exists(os.path.join(
- compat.as_bytes(export_dir),
- compat.as_bytes('saved_model.pb'))))
- self.assertTrue(gfile.Exists(os.path.join(
- compat.as_bytes(export_dir),
- compat.as_bytes('variables'))))
- self.assertTrue(gfile.Exists(os.path.join(
- compat.as_bytes(export_dir),
- compat.as_bytes('variables/variables.index'))))
- self.assertTrue(gfile.Exists(os.path.join(
- compat.as_bytes(export_dir),
- compat.as_bytes('variables/variables.data-00000-of-00001'))))
-
- gfile.DeleteRecursively(tmpdir)
-
class EstimatorHookOrderingTest(test.TestCase):