aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/saved_model
diff options
context:
space:
mode:
authorGravatar Katherine Wu <kathywu@google.com>2018-08-28 20:50:04 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-28 20:53:28 -0700
commit30a6b56176a9738bbe0a40b24f885503f112ae9f (patch)
treedd1ac446a007a9016c64b6de4862541928aa20ea /tensorflow/contrib/saved_model
parent8012cf52d8c7e23766ff2a3d89a3028241de50b9 (diff)
Automated rollback of commit 069f808e5c0462819bcd6c73c75491b00cdd42c2
PiperOrigin-RevId: 210656847
Diffstat (limited to 'tensorflow/contrib/saved_model')
-rw-r--r--tensorflow/contrib/saved_model/BUILD17
-rw-r--r--tensorflow/contrib/saved_model/__init__.py7
-rw-r--r--tensorflow/contrib/saved_model/python/saved_model/keras_saved_model.py260
-rw-r--r--tensorflow/contrib/saved_model/python/saved_model/keras_saved_model_test.py293
4 files changed, 64 insertions, 513 deletions
diff --git a/tensorflow/contrib/saved_model/BUILD b/tensorflow/contrib/saved_model/BUILD
index b897224c6d..e7eb4ac563 100644
--- a/tensorflow/contrib/saved_model/BUILD
+++ b/tensorflow/contrib/saved_model/BUILD
@@ -36,7 +36,6 @@ py_library(
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
deps = [
- ":keras_saved_model",
"//tensorflow/core:protos_all_py",
"//tensorflow/python:framework_ops",
"//tensorflow/python:lib",
@@ -102,33 +101,23 @@ py_library(
tags = ["no_windows"],
visibility = ["//visibility:public"],
deps = [
- "//tensorflow/python:array_ops",
- "//tensorflow/python:framework_ops",
"//tensorflow/python:lib",
- "//tensorflow/python:metrics",
- "//tensorflow/python:platform",
- "//tensorflow/python:saver",
"//tensorflow/python:util",
- "//tensorflow/python/estimator",
- "//tensorflow/python/estimator:export",
- "//tensorflow/python/estimator:keras",
- "//tensorflow/python/estimator:model_fn",
"//tensorflow/python/keras:engine",
- "//tensorflow/python/saved_model",
+ "//tensorflow/python/saved_model:constants",
],
)
py_test(
name = "keras_saved_model_test",
- size = "medium",
+ size = "small",
srcs = ["python/saved_model/keras_saved_model_test.py"],
srcs_version = "PY2AND3",
deps = [
- ":keras_saved_model",
+ ":saved_model_py",
"//tensorflow/python:client_testlib",
"//tensorflow/python:training",
"//tensorflow/python/keras",
"//third_party/py/numpy",
- "@absl_py//absl/testing:parameterized",
],
)
diff --git a/tensorflow/contrib/saved_model/__init__.py b/tensorflow/contrib/saved_model/__init__.py
index 074dc655ac..95e1a8967b 100644
--- a/tensorflow/contrib/saved_model/__init__.py
+++ b/tensorflow/contrib/saved_model/__init__.py
@@ -26,13 +26,10 @@ from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.saved_model.python.saved_model.keras_saved_model import *
from tensorflow.contrib.saved_model.python.saved_model.signature_def_utils import *
-# pylint: enable=unused-import,wildcard-import,line-too-long
+# pylint: enable=unused-import,widcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
-_allowed_symbols = [
- "get_signature_def_by_key",
- "load_keras_model",
- "save_keras_model"]
+_allowed_symbols = ["get_signature_def_by_key", "load_model", "save_model"]
remove_undocumented(__name__, _allowed_symbols)
diff --git a/tensorflow/contrib/saved_model/python/saved_model/keras_saved_model.py b/tensorflow/contrib/saved_model/python/saved_model/keras_saved_model.py
index 2c5c8c4afd..e2a969f053 100644
--- a/tensorflow/contrib/saved_model/python/saved_model/keras_saved_model.py
+++ b/tensorflow/contrib/saved_model/python/saved_model/keras_saved_model.py
@@ -20,69 +20,28 @@ from __future__ import print_function
import os
-from tensorflow.python.client import session
-from tensorflow.python.estimator import keras as estimator_keras_util
-from tensorflow.python.estimator import model_fn as model_fn_lib
-from tensorflow.python.estimator.export import export as export_helpers
-from tensorflow.python.framework import errors
-from tensorflow.python.framework import ops
-from tensorflow.python.keras import backend as K
-from tensorflow.python.keras import models as models_lib
-from tensorflow.python.keras import optimizers
from tensorflow.python.keras.models import model_from_json
from tensorflow.python.lib.io import file_io
-from tensorflow.python.ops import variables
-from tensorflow.python.platform import gfile
-from tensorflow.python.platform import tf_logging as logging
-from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
-from tensorflow.python.saved_model import utils_impl as saved_model_utils
-from tensorflow.python.training import saver as saver_lib
-from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.util import compat
-def save_keras_model(
- model, saved_model_path, custom_objects=None, as_text=None):
+def save_model(model, saved_model_path):
"""Save a `tf.keras.Model` into Tensorflow SavedModel format.
- `save_model` generates new files/folders under the `saved_model_path` folder:
+ `save_model` generates such files/folders under the `saved_model_path` folder:
1) an asset folder containing the json string of the model's
- configuration (topology).
+ configuration(topology).
2) a checkpoint containing the model weights.
- 3) a saved_model.pb file containing the model's MetaGraphs. The prediction
- graph is always exported. The evaluaton and training graphs are exported
- if the following conditions are met:
- - Evaluation: model loss is defined.
- - Training: model is compiled with an optimizer defined under `tf.train`.
- This is because `tf.keras.optimizers.Optimizer` instances cannot be
- saved to checkpoints.
- Model Requirements:
- - Model must be a sequential model or functional model. Subclassed models can
- not be saved via this function, unless you provide an implementation for
- get_config() and from_config().
- - All variables must be saveable by the model. In general, this condition is
- met through the use of layers defined in the keras library. However,
- there is currently a bug with variables created in Lambda layer functions
- not being saved correctly (see
- https://github.com/keras-team/keras/issues/9740).
-
- Note that each mode is exported in separate graphs, so different modes do not
- share variables. To use the train graph with evaluation or prediction graphs,
- create a new checkpoint if variable values have been updated.
+ Note that subclassed models can not be saved via this function, unless you
+ provide an implementation for get_config() and from_config().
+ Also note that `tf.keras.optimizers.Optimizer` instances can not currently be
+ saved to checkpoints. Use optimizers from `tf.train`.
Args:
model: A `tf.keras.Model` to be saved.
saved_model_path: a string specifying the path to the SavedModel directory.
- The SavedModel will be saved to a timestamped folder created within this
- directory.
- custom_objects: Optional dictionary mapping string names to custom classes
- or functions (e.g. custom loss functions).
- as_text: whether to write the `SavedModel` proto in text format.
-
- Returns:
- String path to the SavedModel folder, a subdirectory of `saved_model_path`.
Raises:
NotImplementedError: If the passed in model is a subclassed model.
@@ -90,200 +49,35 @@ def save_keras_model(
if not model._is_graph_network:
raise NotImplementedError
- export_dir = export_helpers.get_timestamped_export_dir(saved_model_path)
- temp_export_dir = export_helpers.get_temp_export_dir(export_dir)
-
- builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
-
- # Manually save variables to export them in an object-based checkpoint. This
- # skips the `builder.add_meta_graph_and_variables()` step, which saves a
- # named-based checkpoint.
- # TODO(b/113134168): Add fn to Builder to save with object-based saver.
- # TODO(b/113178242): This should only export the model json structure. Only
- # one save is needed once the weights can be copied from the model to clone.
- checkpoint_path = _export_model_json_and_variables(model, temp_export_dir)
-
- # Export each mode. Use ModeKeys enums defined for `Estimator` to ensure that
- # Keras models and `Estimator`s are exported with the same format.
- # Every time a mode is exported, the code checks to see if new variables have
- # been created (e.g. optimizer slot variables). If that is the case, the
- # checkpoint is re-saved to include the new variables.
- export_args = {'builder': builder,
- 'model': model,
- 'custom_objects': custom_objects,
- 'checkpoint_path': checkpoint_path}
-
- has_saved_vars = False
- if model.optimizer:
- if isinstance(model.optimizer, optimizers.TFOptimizer):
- _export_mode(model_fn_lib.ModeKeys.TRAIN, has_saved_vars, **export_args)
- has_saved_vars = True
- _export_mode(model_fn_lib.ModeKeys.EVAL, has_saved_vars, **export_args)
- else:
- logging.warning(
- 'Model was compiled with an optimizer, but the optimizer is not from '
- '`tf.train` (e.g. `tf.train.AdagradOptimizer`). Only the serving '
- 'graph was exported. The train and evaluate graphs were not added to '
- 'the SavedModel.')
- _export_mode(model_fn_lib.ModeKeys.PREDICT, has_saved_vars, **export_args)
-
- builder.save(as_text)
-
- gfile.Rename(temp_export_dir, export_dir)
- return export_dir
+ # save model configuration as a json string under assets folder.
+ model_json = model.to_json()
+ assets_destination_dir = os.path.join(
+ compat.as_bytes(saved_model_path),
+ compat.as_bytes(constants.ASSETS_DIRECTORY))
+ if not file_io.file_exists(assets_destination_dir):
+ file_io.recursive_create_dir(assets_destination_dir)
-def _export_model_json_and_variables(model, saved_model_path):
- """Save model variables and json structure into SavedModel subdirectories."""
- # Save model configuration as a json string under assets folder.
- model_json = model.to_json()
model_json_filepath = os.path.join(
- saved_model_utils.get_or_create_assets_dir(saved_model_path),
- compat.as_text(constants.SAVED_MODEL_FILENAME_JSON))
+ compat.as_bytes(assets_destination_dir),
+ compat.as_bytes(constants.SAVED_MODEL_FILENAME_JSON))
file_io.write_string_to_file(model_json_filepath, model_json)
- # Save model weights in checkpoint format under variables folder.
- saved_model_utils.get_or_create_variables_dir(saved_model_path)
- checkpoint_prefix = saved_model_utils.get_variables_path(saved_model_path)
- model.save_weights(checkpoint_prefix, save_format='tf', overwrite=True)
- return checkpoint_prefix
-
-
-def _get_var_list(model):
- """Return list of all checkpointed saveable objects in the model."""
- return checkpointable_utils.named_saveables(model)
-
-
-def _export_mode(
- mode, has_saved_vars, builder, model, custom_objects, checkpoint_path):
- """Export a model, and optionally save new vars from the clone model.
-
- Args:
- mode: A `tf.estimator.ModeKeys` string.
- has_saved_vars: A `boolean` indicating whether the SavedModel has already
- exported variables.
- builder: A `SavedModelBuilder` object.
- model: A `tf.keras.Model` object.
- custom_objects: A dictionary mapping string names to custom classes
- or functions.
- checkpoint_path: String path to checkpoint.
-
- Raises:
- ValueError: If the train/eval mode is being exported, but the model does
- not have an optimizer.
- """
- compile_clone = (mode != model_fn_lib.ModeKeys.PREDICT)
- if compile_clone and not model.optimizer:
- raise ValueError(
- 'Model does not have an optimizer. Cannot export mode %s' % mode)
-
- model_graph = ops.get_default_graph()
- with ops.Graph().as_default() as g:
-
- K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN)
-
- # Clone the model into blank graph. This will create placeholders for inputs
- # and targets.
- clone = models_lib.clone_and_build_model(
- model, custom_objects=custom_objects, compile_clone=compile_clone)
-
- # Make sure that iterations variable is added to the global step collection,
- # to ensure that, when the SavedModel graph is loaded, the iterations
- # variable is returned by `tf.train.get_global_step()`. This is required for
- # compatibility with the SavedModelEstimator.
- if compile_clone:
- g.add_to_collection(ops.GraphKeys.GLOBAL_STEP, clone.optimizer.iterations)
-
- # Extract update and train ops from train/test/predict functions.
- if mode == model_fn_lib.ModeKeys.TRAIN:
- clone._make_train_function()
- builder._add_train_op(clone.train_function.updates_op)
- elif mode == model_fn_lib.ModeKeys.EVAL:
- clone._make_test_function()
- else:
- clone._make_predict_function()
- g.get_collection_ref(ops.GraphKeys.UPDATE_OPS).extend(clone.state_updates)
-
- clone_var_list = checkpointable_utils.named_saveables(clone)
-
- with session.Session().as_default():
- if has_saved_vars:
- # Confirm all variables in the clone have an entry in the checkpoint.
- status = clone.load_weights(checkpoint_path)
- status.assert_existing_objects_matched()
- else:
- # Confirm that variables between the clone and model match up exactly,
- # not counting optimizer objects. Optimizer objects are ignored because
- # if the model has not trained, the slot variables will not have been
- # created yet.
- # TODO(b/113179535): Replace with checkpointable equivalence.
- _assert_same_non_optimizer_objects(model, model_graph, clone, g)
-
- # TODO(b/113178242): Use value transfer for checkpointable objects.
- clone.load_weights(checkpoint_path)
-
- # Add graph and variables to SavedModel.
- # TODO(b/113134168): Switch to add_meta_graph_and_variables.
- clone.save_weights(checkpoint_path, save_format='tf', overwrite=True)
- builder._has_saved_variables = True
-
- # Add graph to the SavedModel builder.
- builder.add_meta_graph(
- model_fn_lib.EXPORT_TAG_MAP[mode],
- signature_def_map=_create_signature_def_map(clone, mode),
- saver=saver_lib.Saver(clone_var_list),
- main_op=variables.local_variables_initializer())
- return None
-
-
-def _create_signature_def_map(model, mode):
- """Create a SignatureDef map from a Keras model."""
- inputs_dict = {name: x for name, x in zip(model.input_names, model.inputs)}
- if model.optimizer:
- targets_dict = {x.name.split(':')[0]: x
- for x in model.targets if x is not None}
- inputs_dict.update(targets_dict)
- outputs_dict = {name: x
- for name, x in zip(model.output_names, model.outputs)}
- export_outputs = model_fn_lib.export_outputs_for_mode(
- mode,
- predictions=outputs_dict,
- loss=model.total_loss if model.optimizer else None,
- metrics=estimator_keras_util._convert_keras_metrics_to_estimator(model))
- return export_helpers.build_all_signature_defs(
- inputs_dict,
- export_outputs=export_outputs,
- serving_only=(mode == model_fn_lib.ModeKeys.PREDICT))
-
-
-def _assert_same_non_optimizer_objects(model, model_graph, clone, clone_graph):
- """Assert model and clone contain the same checkpointable objects."""
-
- def get_non_optimizer_objects(m, g):
- """Gather set of model and optimizer checkpointable objects."""
- # Set default graph because optimizer.variables() returns optimizer
- # variables defined in the default graph.
- with g.as_default():
- all_objects = set(checkpointable_utils.list_objects(m))
- optimizer_and_variables = set()
- for obj in all_objects:
- if isinstance(obj, optimizers.TFOptimizer):
- optimizer_and_variables.update(checkpointable_utils.list_objects(obj))
- optimizer_and_variables.update(set(obj.optimizer.variables()))
- return all_objects - optimizer_and_variables
+ # save model weights in checkpoint format.
+ checkpoint_destination_dir = os.path.join(
+ compat.as_bytes(saved_model_path),
+ compat.as_bytes(constants.VARIABLES_DIRECTORY))
- model_objects = get_non_optimizer_objects(model, model_graph)
- clone_objects = get_non_optimizer_objects(clone, clone_graph)
+ if not file_io.file_exists(checkpoint_destination_dir):
+ file_io.recursive_create_dir(checkpoint_destination_dir)
- if len(model_objects) != len(clone_objects):
- raise errors.InternalError(
- None, None,
- 'Model and clone must use the same variables.'
- '\n\tModel variables: %s\n\t Clone variables: %s'
- % (model_objects, clone_objects))
+ checkpoint_prefix = os.path.join(
+ compat.as_text(checkpoint_destination_dir),
+ compat.as_text(constants.VARIABLES_FILENAME))
+ model.save_weights(checkpoint_prefix, save_format='tf', overwrite=True)
-def load_keras_model(saved_model_path):
+def load_model(saved_model_path):
"""Load a keras.Model from SavedModel.
load_model reinstantiates model state by:
diff --git a/tensorflow/contrib/saved_model/python/saved_model/keras_saved_model_test.py b/tensorflow/contrib/saved_model/python/saved_model/keras_saved_model_test.py
index 8a0dbef788..107ae1b07b 100644
--- a/tensorflow/contrib/saved_model/python/saved_model/keras_saved_model_test.py
+++ b/tensorflow/contrib/saved_model/python/saved_model/keras_saved_model_test.py
@@ -20,35 +20,18 @@ from __future__ import print_function
import os
import shutil
-
-from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.saved_model.python.saved_model import keras_saved_model
from tensorflow.python import keras
-from tensorflow.python.client import session
-from tensorflow.python.eager import context
-from tensorflow.python.estimator import model_fn as model_fn_lib
-from tensorflow.python.framework import errors
-from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
-from tensorflow.python.keras.utils import tf_utils
-from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
-from tensorflow.python.saved_model import constants
-from tensorflow.python.saved_model import loader_impl
-from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import training as training_module
class TestModelSavingandLoading(test.TestCase):
- def _save_model_dir(self, dirname='saved_model'):
- temp_dir = self.get_temp_dir()
- self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
- return os.path.join(temp_dir, dirname)
-
def test_saving_sequential_model(self):
with self.test_session():
model = keras.models.Sequential()
@@ -65,11 +48,13 @@ class TestModelSavingandLoading(test.TestCase):
model.train_on_batch(x, y)
ref_y = model.predict(x)
+ temp_dir = self.get_temp_dir()
+ self.addCleanup(shutil.rmtree, temp_dir)
- temp_saved_model = self._save_model_dir()
- output_path = keras_saved_model.save_keras_model(model, temp_saved_model)
+ temp_saved_model = os.path.join(temp_dir, 'saved_model')
+ keras_saved_model.save_model(model, temp_saved_model)
- loaded_model = keras_saved_model.load_keras_model(output_path)
+ loaded_model = keras_saved_model.load_model(temp_saved_model)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
@@ -84,9 +69,12 @@ class TestModelSavingandLoading(test.TestCase):
x = np.random.random((1, 3))
ref_y = model.predict(x)
- temp_saved_model = self._save_model_dir()
- output_path = keras_saved_model.save_keras_model(model, temp_saved_model)
- loaded_model = keras_saved_model.load_keras_model(output_path)
+ temp_dir = self.get_temp_dir()
+ self.addCleanup(shutil.rmtree, temp_dir)
+
+ temp_saved_model = os.path.join(temp_dir, 'saved_model')
+ keras_saved_model.save_model(model, temp_saved_model)
+ loaded_model = keras_saved_model.load_model(temp_saved_model)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
@@ -107,10 +95,12 @@ class TestModelSavingandLoading(test.TestCase):
model.train_on_batch(x, y)
ref_y = model.predict(x)
+ temp_dir = self.get_temp_dir()
+ self.addCleanup(shutil.rmtree, temp_dir)
- temp_saved_model = self._save_model_dir()
- output_path = keras_saved_model.save_keras_model(model, temp_saved_model)
- loaded_model = keras_saved_model.load_keras_model(output_path)
+ temp_saved_model = os.path.join(temp_dir, 'saved_model')
+ keras_saved_model.save_model(model, temp_saved_model)
+ loaded_model = keras_saved_model.load_model(temp_saved_model)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
@@ -128,10 +118,12 @@ class TestModelSavingandLoading(test.TestCase):
y = np.random.random((1, 3))
ref_y = model.predict(x)
+ temp_dir = self.get_temp_dir()
+ self.addCleanup(shutil.rmtree, temp_dir)
- temp_saved_model = self._save_model_dir()
- output_path = keras_saved_model.save_keras_model(model, temp_saved_model)
- loaded_model = keras_saved_model.load_keras_model(output_path)
+ temp_saved_model = os.path.join(temp_dir, 'saved_model')
+ keras_saved_model.save_model(model, temp_saved_model)
+ loaded_model = keras_saved_model.load_model(temp_saved_model)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
@@ -150,13 +142,14 @@ class TestModelSavingandLoading(test.TestCase):
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
- model.train_on_batch(x, y)
ref_y = model.predict(x)
+ temp_dir = self.get_temp_dir()
+ self.addCleanup(shutil.rmtree, temp_dir)
- temp_saved_model = self._save_model_dir()
- output_path = keras_saved_model.save_keras_model(model, temp_saved_model)
- loaded_model = keras_saved_model.load_keras_model(output_path)
+ temp_saved_model = os.path.join(temp_dir, 'saved_model')
+ keras_saved_model.save_model(model, temp_saved_model)
+ loaded_model = keras_saved_model.load_model(temp_saved_model)
loaded_model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
@@ -177,10 +170,8 @@ class TestModelSavingandLoading(test.TestCase):
self.assertAllClose(ref_y, y, atol=1e-05)
# test saving/loading again
- temp_saved_model2 = self._save_model_dir('saved_model_2')
- output_path2 = keras_saved_model.save_keras_model(
- loaded_model, temp_saved_model2)
- loaded_model = keras_saved_model.load_keras_model(output_path2)
+ keras_saved_model.save_model(loaded_model, temp_saved_model)
+ loaded_model = keras_saved_model.load_model(temp_saved_model)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
@@ -199,231 +190,11 @@ class TestModelSavingandLoading(test.TestCase):
return self.layer2(self.layer1(inp))
model = SubclassedModel()
-
- temp_saved_model = self._save_model_dir()
- with self.assertRaises(NotImplementedError):
- keras_saved_model.save_keras_model(model, temp_saved_model)
-
-
-class LayerWithLearningPhase(keras.engine.base_layer.Layer):
-
- def call(self, x):
- phase = keras.backend.learning_phase()
- output = tf_utils.smart_cond(
- phase, lambda: x * 0, lambda: array_ops.identity(x))
- if not context.executing_eagerly():
- output._uses_learning_phase = True # pylint: disable=protected-access
- return output
-
- def compute_output_shape(self, input_shape):
- return input_shape
-
-
-def functional_model(uses_learning_phase):
- inputs = keras.layers.Input(shape=(3,))
- x = keras.layers.Dense(2)(inputs)
- x = keras.layers.Dense(3)(x)
- if uses_learning_phase:
- x = LayerWithLearningPhase()(x)
- return keras.models.Model(inputs, x)
-
-
-def sequential_model(uses_learning_phase):
- model = keras.models.Sequential()
- model.add(keras.layers.Dense(2, input_shape=(3,)))
- model.add(keras.layers.Dense(3))
- if uses_learning_phase:
- model.add(LayerWithLearningPhase())
- return model
-
-
-def load_model(sess, path, mode):
- tags = model_fn_lib.EXPORT_TAG_MAP[mode]
- sig_def_key = (signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
- if mode == model_fn_lib.ModeKeys.PREDICT else mode)
- meta_graph_def = loader_impl.load(sess, tags, path)
- inputs = {
- k: sess.graph.get_tensor_by_name(v.name)
- for k, v in meta_graph_def.signature_def[sig_def_key].inputs.items()}
- outputs = {
- k: sess.graph.get_tensor_by_name(v.name)
- for k, v in meta_graph_def.signature_def[sig_def_key].outputs.items()}
- return inputs, outputs
-
-
-@test_util.run_all_in_graph_and_eager_modes
-class TestModelSavedModelExport(test.TestCase, parameterized.TestCase):
-
- def _save_model_dir(self, dirname='saved_model'):
temp_dir = self.get_temp_dir()
- self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
- return os.path.join(temp_dir, dirname)
-
- @parameterized.parameters(
- (functional_model, True, training_module.AdadeltaOptimizer(), True),
- (functional_model, True, training_module.AdadeltaOptimizer(), False),
- (functional_model, False, None, False),
- (sequential_model, True, training_module.AdadeltaOptimizer(), True),
- (sequential_model, True, training_module.AdadeltaOptimizer(), False),
- (sequential_model, False, None, False))
- def testSaveAndLoadSavedModelExport(
- self, model_builder, uses_learning_phase, optimizer, train_before_export):
- saved_model_path = self._save_model_dir()
- with self.test_session(graph=ops.Graph()):
- input_arr = np.random.random((1, 3))
- target_arr = np.random.random((1, 3))
-
- model = model_builder(uses_learning_phase)
- if optimizer is not None:
- model.compile(
- loss='mse',
- optimizer=optimizer,
- metrics=['mae'])
- if train_before_export:
- model.train_on_batch(input_arr, target_arr)
-
- ref_loss, ref_mae = model.evaluate(input_arr, target_arr)
-
- ref_predict = model.predict(input_arr)
-
- # Export SavedModel
- output_path = keras_saved_model.save_keras_model(model, saved_model_path)
-
- input_name = model.input_names[0]
- output_name = model.output_names[0]
- target_name = output_name + '_target'
-
- # Load predict graph, and test predictions
- with session.Session(graph=ops.Graph()) as sess:
- inputs, outputs = load_model(sess, output_path,
- model_fn_lib.ModeKeys.PREDICT)
-
- predictions = sess.run(outputs[output_name],
- {inputs[input_name]: input_arr})
- self.assertAllClose(ref_predict, predictions, atol=1e-05)
-
- if optimizer:
- # Load eval graph, and test predictions, loss and metric values
- with session.Session(graph=ops.Graph()) as sess:
- inputs, outputs = load_model(sess, output_path,
- model_fn_lib.ModeKeys.EVAL)
-
- eval_results = sess.run(outputs, {inputs[input_name]: input_arr,
- inputs[target_name]: target_arr})
-
- self.assertEqual(int(train_before_export),
- sess.run(training_module.get_global_step()))
- self.assertAllClose(ref_loss, eval_results['loss'], atol=1e-05)
- self.assertAllClose(
- ref_mae, eval_results['metrics/mae/update_op'], atol=1e-05)
- self.assertAllClose(
- ref_predict, eval_results['predictions/' + output_name], atol=1e-05)
-
- # Load train graph, and check for the train op, and prediction values
- with session.Session(graph=ops.Graph()) as sess:
- inputs, outputs = load_model(sess, output_path,
- model_fn_lib.ModeKeys.TRAIN)
- self.assertEqual(int(train_before_export),
- sess.run(training_module.get_global_step()))
- self.assertIn('loss', outputs)
- self.assertIn('metrics/mae/update_op', outputs)
- self.assertIn('metrics/mae/value', outputs)
- self.assertIn('predictions/' + output_name, outputs)
-
- # Train for a step
- train_op = ops.get_collection(constants.TRAIN_OP_KEY)
- train_outputs, _ = sess.run(
- [outputs, train_op], {inputs[input_name]: input_arr,
- inputs[target_name]: target_arr})
- self.assertEqual(int(train_before_export) + 1,
- sess.run(training_module.get_global_step()))
-
- if uses_learning_phase:
- self.assertAllClose(
- [[0, 0, 0]], train_outputs['predictions/' + output_name],
- atol=1e-05)
- else:
- self.assertNotAllClose(
- [[0, 0, 0]], train_outputs['predictions/' + output_name],
- atol=1e-05)
-
- def testSaveAndLoadSavedModelWithCustomObject(self):
- saved_model_path = self._save_model_dir()
- with session.Session(graph=ops.Graph()) as sess:
- def relu6(x):
- return keras.backend.relu(x, max_value=6)
- inputs = keras.layers.Input(shape=(1,))
- outputs = keras.layers.Activation(relu6)(inputs)
- model = keras.models.Model(inputs, outputs)
- output_path = keras_saved_model.save_keras_model(
- model, saved_model_path, custom_objects={'relu6': relu6})
- with session.Session(graph=ops.Graph()) as sess:
- inputs, outputs = load_model(sess, output_path,
- model_fn_lib.ModeKeys.PREDICT)
- input_name = model.input_names[0]
- output_name = model.output_names[0]
- predictions = sess.run(
- outputs[output_name], {inputs[input_name]: [[7], [-3], [4]]})
- self.assertAllEqual([[6], [0], [4]], predictions)
-
- def testAssertModelCloneSameObjectsIgnoreOptimizer(self):
- input_arr = np.random.random((1, 3))
- target_arr = np.random.random((1, 3))
-
- model_graph = ops.Graph()
- clone_graph = ops.Graph()
-
- # Create two models with the same layers but different optimizers.
- with session.Session(graph=model_graph):
- inputs = keras.layers.Input(shape=(3,))
- x = keras.layers.Dense(2)(inputs)
- x = keras.layers.Dense(3)(x)
- model = keras.models.Model(inputs, x)
-
- model.compile(loss='mse', optimizer=training_module.AdadeltaOptimizer())
- model.train_on_batch(input_arr, target_arr)
-
- with session.Session(graph=clone_graph):
- inputs = keras.layers.Input(shape=(3,))
- x = keras.layers.Dense(2)(inputs)
- x = keras.layers.Dense(3)(x)
- clone = keras.models.Model(inputs, x)
- clone.compile(loss='mse', optimizer=keras.optimizers.RMSprop(lr=0.0001))
- clone.train_on_batch(input_arr, target_arr)
-
- keras_saved_model._assert_same_non_optimizer_objects(
- model, model_graph, clone, clone_graph)
-
- def testAssertModelCloneSameObjectsThrowError(self):
- input_arr = np.random.random((1, 3))
- target_arr = np.random.random((1, 3))
-
- model_graph = ops.Graph()
- clone_graph = ops.Graph()
-
- # Create two models with the same layers but different optimizers.
- with session.Session(graph=model_graph):
- inputs = keras.layers.Input(shape=(3,))
- x = keras.layers.Dense(2)(inputs)
- x = keras.layers.Dense(3)(x)
- model = keras.models.Model(inputs, x)
-
- model.compile(loss='mse', optimizer=training_module.AdadeltaOptimizer())
- model.train_on_batch(input_arr, target_arr)
-
- with session.Session(graph=clone_graph):
- inputs = keras.layers.Input(shape=(3,))
- x = keras.layers.Dense(2)(inputs)
- x = keras.layers.Dense(4)(x)
- x = keras.layers.Dense(3)(x)
- clone = keras.models.Model(inputs, x)
- clone.compile(loss='mse', optimizer=keras.optimizers.RMSprop(lr=0.0001))
- clone.train_on_batch(input_arr, target_arr)
-
- with self.assertRaisesRegexp(
- errors.InternalError, 'Model and clone must use the same variables.'):
- keras_saved_model._assert_same_non_optimizer_objects(
- model, model_graph, clone, clone_graph)
+ self.addCleanup(shutil.rmtree, temp_dir)
+ temp_saved_model = os.path.join(temp_dir, 'saved_model')
+ with self.assertRaises(NotImplementedError):
+ keras_saved_model.save_model(model, temp_saved_model)
if __name__ == '__main__':