aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/estimator
diff options
context:
space:
mode:
authorGravatar Rohan Jain <rohanj@google.com>2018-09-26 22:00:22 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-09-26 22:03:37 -0700
commita40cfd42e20d7e4520c1306666c9dfee97eb0a2e (patch)
tree380100ade305a7b1fe8e7baa7eed2197daf1eabb /tensorflow/python/estimator
parent941e757a2364bb2e7cf41b8d980d7639849c6c5d (diff)
Automated rollback of commit e00d7744dbab5c73e4d8ffa8a7d361f7b2dcefff
PiperOrigin-RevId: 214721004
Diffstat (limited to 'tensorflow/python/estimator')
-rw-r--r--tensorflow/python/estimator/BUILD2
-rw-r--r--tensorflow/python/estimator/canned/dnn.py188
-rw-r--r--tensorflow/python/estimator/canned/dnn_linear_combined.py7
-rw-r--r--tensorflow/python/estimator/canned/dnn_test.py161
-rw-r--r--tensorflow/python/estimator/canned/dnn_testing_utils.py116
5 files changed, 346 insertions, 128 deletions
diff --git a/tensorflow/python/estimator/BUILD b/tensorflow/python/estimator/BUILD
index 7f2349954d..ba1b7ec2b5 100644
--- a/tensorflow/python/estimator/BUILD
+++ b/tensorflow/python/estimator/BUILD
@@ -281,6 +281,7 @@ py_library(
":prediction_keys",
"//tensorflow:tensorflow_py_no_contrib",
"//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
"@six_archive//:six",
],
)
@@ -303,6 +304,7 @@ py_test(
":pandas_io",
":prediction_keys",
"//tensorflow:tensorflow_py_no_contrib",
+ "@absl_py//absl/testing:parameterized",
"@six_archive//:six",
],
)
diff --git a/tensorflow/python/estimator/canned/dnn.py b/tensorflow/python/estimator/canned/dnn.py
index 1c0c4581c0..97971f9561 100644
--- a/tensorflow/python/estimator/canned/dnn.py
+++ b/tensorflow/python/estimator/canned/dnn.py
@@ -24,7 +24,10 @@ from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import optimizers
-from tensorflow.python.feature_column import feature_column as feature_column_lib
+from tensorflow.python.feature_column import feature_column
+from tensorflow.python.feature_column import feature_column_v2
+from tensorflow.python.framework import ops
+from tensorflow.python.keras.engine import training
from tensorflow.python.layers import core as core_layers
from tensorflow.python.layers import normalization
from tensorflow.python.ops import init_ops
@@ -45,8 +48,14 @@ def _add_hidden_layer_summary(value, tag):
summary.histogram('%s/activation' % tag, value)
-def _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn,
- dropout, input_layer_partitioner, batch_norm):
+def _dnn_logit_fn_builder(units,
+ hidden_units,
+ feature_columns,
+ activation_fn,
+ dropout,
+ input_layer_partitioner,
+ batch_norm,
+ shared_state_manager=None):
"""Function builder for a dnn logit_fn.
Args:
@@ -60,6 +69,8 @@ def _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn,
coordinate.
input_layer_partitioner: Partitioner for input layer.
batch_norm: Whether to use batch normalization after each hidden layer.
+ shared_state_manager: A SharedEmbeddingStateManager object to hold the
+ shared state for SharedEmbeddingColumn's.
Returns:
A logit_fn (see below).
@@ -85,50 +96,132 @@ def _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn,
A `Tensor` representing the logits, or a list of `Tensor`'s representing
multiple logits in the MultiHead case.
"""
- is_training = mode == model_fn.ModeKeys.TRAIN
- with variable_scope.variable_scope(
- 'input_from_feature_columns',
- values=tuple(six.itervalues(features)),
- partitioner=input_layer_partitioner):
- net = feature_column_lib.input_layer(
- features=features, feature_columns=feature_columns)
+ dnn_model = _DNNModel(
+ units,
+ hidden_units,
+ feature_columns,
+ activation_fn,
+ dropout,
+ input_layer_partitioner,
+ batch_norm,
+ shared_state_manager,
+ name='dnn')
+ return dnn_model(features, mode)
+
+ return dnn_logit_fn
+
+
+def _get_previous_name_scope():
+ current_name_scope = ops.get_name_scope()
+ return current_name_scope.rsplit('/', 1)[0] + '/'
+
+
+class _DNNModel(training.Model):
+ """A DNN Model."""
+
+ def __init__(self,
+ units,
+ hidden_units,
+ feature_columns,
+ activation_fn,
+ dropout,
+ input_layer_partitioner,
+ batch_norm,
+ shared_state_manager,
+ name=None,
+ **kwargs):
+ super(_DNNModel, self).__init__(name=name, **kwargs)
+ self._is_v2 = False
+ if feature_column_v2.is_feature_column_v2(feature_columns):
+ self._is_v2 = True
+ self._input_layer = feature_column_v2.FeatureLayer(
+ feature_columns=feature_columns,
+ name='input_layer',
+ shared_state_manager=shared_state_manager)
+ else:
+ self._input_layer = feature_column.InputLayer(
+ feature_columns=feature_columns,
+ name='input_layer',
+ create_scope_now=False)
+
+ self._add_layer(self._input_layer, 'input_layer')
+
+ self._dropout = dropout
+ self._batch_norm = batch_norm
+
+ self._hidden_layers = []
+ self._dropout_layers = []
+ self._batch_norm_layers = []
+ self._hidden_layer_scope_names = []
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
- 'hiddenlayer_%d' % layer_id, values=(net,)) as hidden_layer_scope:
- net = core_layers.dense(
- net,
+ 'hiddenlayer_%d' % layer_id) as hidden_layer_scope:
+ hidden_layer = core_layers.Dense(
units=num_hidden_units,
activation=activation_fn,
kernel_initializer=init_ops.glorot_uniform_initializer(),
- name=hidden_layer_scope)
- if dropout is not None and is_training:
- net = core_layers.dropout(net, rate=dropout, training=True)
- if batch_norm:
- # TODO(hjm): In future, if this becomes popular, we can enable
- # customization of the batch normalization params by accepting a
- # list of `BatchNormalization` instances as `batch_norm`.
- net = normalization.batch_normalization(
- net,
+ name=hidden_layer_scope,
+ _scope=hidden_layer_scope)
+ self._add_layer(hidden_layer, hidden_layer_scope.name)
+ self._hidden_layer_scope_names.append(hidden_layer_scope.name)
+ self._hidden_layers.append(hidden_layer)
+ if self._dropout is not None:
+ dropout_layer = core_layers.Dropout(rate=self._dropout)
+ self._add_layer(dropout_layer, dropout_layer.name)
+ self._dropout_layers.append(dropout_layer)
+ if self._batch_norm:
+ batch_norm_layer = normalization.BatchNormalization(
# The default momentum 0.99 actually crashes on certain
# problem, so here we use 0.999, which is the default of
# tf.contrib.layers.batch_norm.
momentum=0.999,
- training=is_training,
- name='batchnorm_%d' % layer_id)
- _add_hidden_layer_summary(net, hidden_layer_scope.name)
-
- with variable_scope.variable_scope('logits', values=(net,)) as logits_scope:
- logits = core_layers.dense(
- net,
+ trainable=True,
+ name='batchnorm_%d' % layer_id,
+ _scope='batchnorm_%d' % layer_id)
+ self._add_layer(batch_norm_layer, batch_norm_layer.name)
+ self._batch_norm_layers.append(batch_norm_layer)
+
+ with variable_scope.variable_scope('logits') as logits_scope:
+ self._logits_layer = core_layers.Dense(
units=units,
activation=None,
kernel_initializer=init_ops.glorot_uniform_initializer(),
- name=logits_scope)
- _add_hidden_layer_summary(logits, logits_scope.name)
-
- return logits
-
- return dnn_logit_fn
+ name=logits_scope,
+ _scope=logits_scope)
+ self._add_layer(self._logits_layer, logits_scope.name)
+ self._logits_scope_name = logits_scope.name
+ self._logits_layer._use_resource_variables = False # pylint: disable=protected-access
+ self._input_layer_partitioner = input_layer_partitioner
+
+ def call(self, features, mode):
+ is_training = mode == model_fn.ModeKeys.TRAIN
+ # The Keras training.Model adds a name_scope with the name of the model
+ # which modifies the constructed graph. Hence we add another name_scope
+ # here which is the one before the training.Model one was applied.
+ # TODO(rohanj): Remove this in TF 2.0 (b/116728605)
+ with ops.name_scope(name=_get_previous_name_scope()):
+ # TODO(rohanj): Remove dependence on variable scope for partitioning.
+ with variable_scope.variable_scope(
+ 'input_from_feature_columns',
+ partitioner=self._input_layer_partitioner):
+ net = self._input_layer(features)
+ for i in range(len(self._hidden_layers)):
+ net = self._hidden_layers[i](net)
+ if self._dropout is not None and is_training:
+ net = self._dropout_layers[i](net, training=True)
+ if self._batch_norm:
+ net = self._batch_norm_layers[i](net, training=is_training)
+ _add_hidden_layer_summary(net, self._hidden_layer_scope_names[i])
+
+ logits = self._logits_layer(net)
+ _add_hidden_layer_summary(logits, self._logits_scope_name)
+ return logits
+
+ def _add_layer(self, layer, layer_name):
+ # "Magic" required for keras.Model classes to track all the variables in
+ # a list of layers.Layer objects.
+ # TODO(ashankar): Figure out API so user code doesn't have to do this.
+ setattr(self, layer_name, layer)
def _dnn_model_fn(features,
@@ -143,7 +236,8 @@ def _dnn_model_fn(features,
input_layer_partitioner=None,
config=None,
use_tpu=False,
- batch_norm=False):
+ batch_norm=False,
+ shared_state_manager=None):
"""Deep Neural Net model_fn.
Args:
@@ -167,6 +261,8 @@ def _dnn_model_fn(features,
use_tpu: Whether to make a DNN model able to run on TPU. Will make function
return a `_TPUEstimatorSpec` instance and disable variable partitioning.
batch_norm: Whether to use batch normalization after each hidden layer.
+ shared_state_manager: A SharedEmbeddingStateManager object to hold the
+ shared state for SharedEmbeddingColumn's.
Returns:
An `EstimatorSpec` instance.
@@ -202,7 +298,8 @@ def _dnn_model_fn(features,
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
- batch_norm=batch_norm)
+ batch_norm=batch_norm,
+ shared_state_manager=shared_state_manager)
logits = logit_fn(features=features, mode=mode)
if use_tpu:
@@ -370,6 +467,10 @@ class DNNClassifier(estimator.Estimator):
"""
head = head_lib._binary_logistic_or_multi_class_head( # pylint: disable=protected-access
n_classes, weight_column, label_vocabulary, loss_reduction)
+
+ shared_state_manager = feature_column_v2.maybe_create_shared_state_manager(
+ feature_columns)
+
def _model_fn(features, labels, mode, config):
"""Call the defined shared _dnn_model_fn."""
return _dnn_model_fn(
@@ -384,7 +485,8 @@ class DNNClassifier(estimator.Estimator):
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config,
- batch_norm=batch_norm)
+ batch_norm=batch_norm,
+ shared_state_manager=shared_state_manager)
super(DNNClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
@@ -532,6 +634,10 @@ class DNNRegressor(estimator.Estimator):
batch_norm: Whether to use batch normalization after each hidden layer.
"""
+ shared_state_manager = None
+ if feature_column_v2.is_feature_column_v2(feature_columns):
+ shared_state_manager = feature_column_v2.SharedEmbeddingStateManager()
+
def _model_fn(features, labels, mode, config):
"""Call the defined shared _dnn_model_fn."""
return _dnn_model_fn(
@@ -539,7 +645,8 @@ class DNNRegressor(estimator.Estimator):
labels=labels,
mode=mode,
head=head_lib._regression_head( # pylint: disable=protected-access
- label_dimension=label_dimension, weight_column=weight_column,
+ label_dimension=label_dimension,
+ weight_column=weight_column,
loss_reduction=loss_reduction),
hidden_units=hidden_units,
feature_columns=tuple(feature_columns or []),
@@ -548,7 +655,8 @@ class DNNRegressor(estimator.Estimator):
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config,
- batch_norm=batch_norm)
+ batch_norm=batch_norm,
+ shared_state_manager=shared_state_manager)
super(DNNRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
diff --git a/tensorflow/python/estimator/canned/dnn_linear_combined.py b/tensorflow/python/estimator/canned/dnn_linear_combined.py
index 9799cf9e98..f712244c8d 100644
--- a/tensorflow/python/estimator/canned/dnn_linear_combined.py
+++ b/tensorflow/python/estimator/canned/dnn_linear_combined.py
@@ -27,6 +27,7 @@ from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import optimizers
+from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
@@ -142,6 +143,9 @@ def _dnn_linear_combined_model_fn(features,
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
+ shared_state_manager = feature_column_v2.maybe_create_shared_state_manager(
+ list(linear_feature_columns) + list(dnn_feature_columns))
+
# Build DNN Logits.
dnn_parent_scope = 'dnn'
@@ -169,8 +173,9 @@ def _dnn_linear_combined_model_fn(features,
feature_columns=dnn_feature_columns,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
+ batch_norm=batch_norm,
input_layer_partitioner=input_layer_partitioner,
- batch_norm=batch_norm)
+ shared_state_manager=shared_state_manager)
dnn_logits = dnn_logit_fn(features=features, mode=mode)
linear_parent_scope = 'linear'
diff --git a/tensorflow/python/estimator/canned/dnn_test.py b/tensorflow/python/estimator/canned/dnn_test.py
index fc90b7c35e..756696cea0 100644
--- a/tensorflow/python/estimator/canned/dnn_test.py
+++ b/tensorflow/python/estimator/canned/dnn_test.py
@@ -21,6 +21,7 @@ from __future__ import print_function
import shutil
import tempfile
+from absl.testing import parameterized
import numpy as np
import six
@@ -33,6 +34,7 @@ from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
+from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
@@ -62,15 +64,32 @@ class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
- dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
+ dnn_testing_utils.BaseDNNModelFnTest.__init__(
+ self, dnn._dnn_model_fn, fc_impl=feature_column)
+
+
+class DNNModelFnV2Test(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
+
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ test.TestCase.__init__(self, methodName)
+ dnn_testing_utils.BaseDNNModelFnTest.__init__(
+ self, dnn._dnn_model_fn, fc_impl=feature_column_v2)
class DNNLogitFnTest(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
- dnn_testing_utils.BaseDNNLogitFnTest.__init__(self,
- dnn._dnn_logit_fn_builder)
+ dnn_testing_utils.BaseDNNLogitFnTest.__init__(
+ self, dnn._dnn_logit_fn_builder, fc_impl=feature_column)
+
+
+class DNNLogitFnV2Test(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase):
+
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ test.TestCase.__init__(self, methodName)
+ dnn_testing_utils.BaseDNNLogitFnTest.__init__(
+ self, dnn._dnn_logit_fn_builder, fc_impl=feature_column_v2)
class DNNWarmStartingTest(dnn_testing_utils.BaseDNNWarmStartingTest,
@@ -78,8 +97,17 @@ class DNNWarmStartingTest(dnn_testing_utils.BaseDNNWarmStartingTest,
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
- dnn_testing_utils.BaseDNNWarmStartingTest.__init__(self, _dnn_classifier_fn,
- _dnn_regressor_fn)
+ dnn_testing_utils.BaseDNNWarmStartingTest.__init__(
+ self, _dnn_classifier_fn, _dnn_regressor_fn, fc_impl=feature_column)
+
+
+class DNNWarmStartingV2Test(dnn_testing_utils.BaseDNNWarmStartingTest,
+ test.TestCase):
+
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ test.TestCase.__init__(self, methodName)
+ dnn_testing_utils.BaseDNNWarmStartingTest.__init__(
+ self, _dnn_classifier_fn, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNClassifierEvaluateTest(
@@ -88,7 +116,16 @@ class DNNClassifierEvaluateTest(
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
- self, _dnn_classifier_fn)
+ self, _dnn_classifier_fn, fc_impl=feature_column)
+
+
+class DNNClassifierEvaluateV2Test(
+ dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
+
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ test.TestCase.__init__(self, methodName)
+ dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
+ self, _dnn_classifier_fn, fc_impl=feature_column_v2)
class DNNClassifierPredictTest(
@@ -97,7 +134,16 @@ class DNNClassifierPredictTest(
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
- self, _dnn_classifier_fn)
+ self, _dnn_classifier_fn, fc_impl=feature_column)
+
+
+class DNNClassifierPredictV2Test(dnn_testing_utils.BaseDNNClassifierPredictTest,
+ test.TestCase):
+
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ test.TestCase.__init__(self, methodName)
+ dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
+ self, _dnn_classifier_fn, fc_impl=feature_column_v2)
class DNNClassifierTrainTest(
@@ -106,7 +152,16 @@ class DNNClassifierTrainTest(
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
- self, _dnn_classifier_fn)
+ self, _dnn_classifier_fn, fc_impl=feature_column)
+
+
+class DNNClassifierTrainV2Test(dnn_testing_utils.BaseDNNClassifierTrainTest,
+ test.TestCase):
+
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ test.TestCase.__init__(self, methodName)
+ dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
+ self, _dnn_classifier_fn, fc_impl=feature_column_v2)
def _dnn_regressor_fn(*args, **kwargs):
@@ -119,7 +174,16 @@ class DNNRegressorEvaluateTest(
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
- self, _dnn_regressor_fn)
+ self, _dnn_regressor_fn, fc_impl=feature_column)
+
+
+class DNNRegressorEvaluateV2Test(dnn_testing_utils.BaseDNNRegressorEvaluateTest,
+ test.TestCase):
+
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ test.TestCase.__init__(self, methodName)
+ dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
+ self, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNRegressorPredictTest(
@@ -128,7 +192,16 @@ class DNNRegressorPredictTest(
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
- self, _dnn_regressor_fn)
+ self, _dnn_regressor_fn, fc_impl=feature_column)
+
+
+class DNNRegressorPredictV2Test(dnn_testing_utils.BaseDNNRegressorPredictTest,
+ test.TestCase):
+
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ test.TestCase.__init__(self, methodName)
+ dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
+ self, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNRegressorTrainTest(
@@ -137,7 +210,16 @@ class DNNRegressorTrainTest(
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
- self, _dnn_regressor_fn)
+ self, _dnn_regressor_fn, fc_impl=feature_column)
+
+
+class DNNRegressorTrainV2Test(dnn_testing_utils.BaseDNNRegressorTrainTest,
+ test.TestCase):
+
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ test.TestCase.__init__(self, methodName)
+ dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
+ self, _dnn_regressor_fn, fc_impl=feature_column_v2)
def _queue_parsed_features(feature_map):
@@ -156,7 +238,8 @@ def _queue_parsed_features(feature_map):
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
-class DNNRegressorIntegrationTest(test.TestCase):
+@parameterized.parameters((feature_column,), (feature_column_v2,))
+class DNNRegressorIntegrationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
@@ -166,11 +249,11 @@ class DNNRegressorIntegrationTest(test.TestCase):
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
- def _test_complete_flow(
- self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
- label_dimension, batch_size):
- feature_columns = [
- feature_column.numeric_column('x', shape=(input_dimension,))]
+ def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
+ input_dimension, label_dimension, batch_size,
+ fc_impl):
+ feature_columns = [fc_impl.numeric_column('x', shape=(input_dimension,))]
+
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
@@ -194,14 +277,14 @@ class DNNRegressorIntegrationTest(test.TestCase):
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
- feature_spec = feature_column.make_parse_example_spec(feature_columns)
+ feature_spec = fc_impl.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
- def test_numpy_input_fn(self):
+ def test_numpy_input_fn(self, fc_impl):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
@@ -230,9 +313,10 @@ class DNNRegressorIntegrationTest(test.TestCase):
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
- batch_size=batch_size)
+ batch_size=batch_size,
+ fc_impl=fc_impl)
- def test_pandas_input_fn(self):
+ def test_pandas_input_fn(self, fc_impl):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
@@ -263,9 +347,10 @@ class DNNRegressorIntegrationTest(test.TestCase):
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
- batch_size=batch_size)
+ batch_size=batch_size,
+ fc_impl=fc_impl)
- def test_input_fn_from_parse_example(self):
+ def test_input_fn_from_parse_example(self, fc_impl):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
@@ -313,9 +398,11 @@ class DNNRegressorIntegrationTest(test.TestCase):
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
- batch_size=batch_size)
+ batch_size=batch_size,
+ fc_impl=fc_impl)
+@parameterized.parameters((feature_column,), (feature_column_v2,))
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
@@ -329,11 +416,10 @@ class DNNClassifierIntegrationTest(test.TestCase):
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
- def _test_complete_flow(
- self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
- n_classes, batch_size):
- feature_columns = [
- feature_column.numeric_column('x', shape=(input_dimension,))]
+ def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
+ input_dimension, n_classes, batch_size, fc_impl):
+ feature_columns = [fc_impl.numeric_column('x', shape=(input_dimension,))]
+
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
@@ -357,14 +443,14 @@ class DNNClassifierIntegrationTest(test.TestCase):
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
- feature_spec = feature_column.make_parse_example_spec(feature_columns)
+ feature_spec = fc_impl.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
- def test_numpy_input_fn(self):
+ def test_numpy_input_fn(self, fc_impl):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
@@ -396,9 +482,10 @@ class DNNClassifierIntegrationTest(test.TestCase):
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
- batch_size=batch_size)
+ batch_size=batch_size,
+ fc_impl=fc_impl)
- def test_pandas_input_fn(self):
+ def test_pandas_input_fn(self, fc_impl):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
@@ -430,9 +517,10 @@ class DNNClassifierIntegrationTest(test.TestCase):
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
- batch_size=batch_size)
+ batch_size=batch_size,
+ fc_impl=fc_impl)
- def test_input_fn_from_parse_example(self):
+ def test_input_fn_from_parse_example(self, fc_impl):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
@@ -484,7 +572,8 @@ class DNNClassifierIntegrationTest(test.TestCase):
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
- batch_size=batch_size)
+ batch_size=batch_size,
+ fc_impl=fc_impl)
if __name__ == '__main__':
diff --git a/tensorflow/python/estimator/canned/dnn_testing_utils.py b/tensorflow/python/estimator/canned/dnn_testing_utils.py
index 11f1e93630..cd66d0a3bd 100644
--- a/tensorflow/python/estimator/canned/dnn_testing_utils.py
+++ b/tensorflow/python/estimator/canned/dnn_testing_utils.py
@@ -104,6 +104,7 @@ def create_checkpoint(weights_and_biases,
weights_and_biases: Iterable of tuples of weight and bias values.
global_step: Initial global step to save in checkpoint.
model_dir: Directory into which checkpoint is saved.
+ batch_norm_vars: Variables used for batch normalization.
"""
weights, biases = zip(*weights_and_biases)
if batch_norm_vars:
@@ -244,8 +245,9 @@ def mock_optimizer(testcase, hidden_units, expected_loss=None):
class BaseDNNModelFnTest(object):
"""Tests that _dnn_model_fn passes expected logits to mock head."""
- def __init__(self, dnn_model_fn):
+ def __init__(self, dnn_model_fn, fc_impl=feature_column):
self._dnn_model_fn = dnn_model_fn
+ self._fc_impl = fc_impl
def setUp(self):
self._model_dir = tempfile.mkdtemp()
@@ -272,7 +274,7 @@ class BaseDNNModelFnTest(object):
head=head,
hidden_units=hidden_units,
feature_columns=[
- feature_column.numeric_column(
+ self._fc_impl.numeric_column(
'age', shape=np.array(inputs).shape[1:])
],
optimizer=mock_optimizer(self, hidden_units))
@@ -462,8 +464,8 @@ class BaseDNNModelFnTest(object):
head=head,
hidden_units=hidden_units,
feature_columns=[
- feature_column.numeric_column('age'),
- feature_column.numeric_column('height')
+ self._fc_impl.numeric_column('age'),
+ self._fc_impl.numeric_column('height')
],
optimizer=mock_optimizer(self, hidden_units))
with monitored_session.MonitoredTrainingSession(
@@ -499,7 +501,7 @@ class BaseDNNModelFnTest(object):
head=head,
hidden_units=hidden_units,
feature_columns=[
- feature_column.numeric_column(
+ self._fc_impl.numeric_column(
'age', shape=np.array(inputs).shape[1:])
],
optimizer=mock_optimizer(self, hidden_units))
@@ -508,8 +510,9 @@ class BaseDNNModelFnTest(object):
class BaseDNNLogitFnTest(object):
"""Tests correctness of logits calculated from _dnn_logit_fn_builder."""
- def __init__(self, dnn_logit_fn_builder):
+ def __init__(self, dnn_logit_fn_builder, fc_impl=feature_column):
self._dnn_logit_fn_builder = dnn_logit_fn_builder
+ self._fc_impl = fc_impl
def setUp(self):
self._model_dir = tempfile.mkdtemp()
@@ -541,7 +544,7 @@ class BaseDNNLogitFnTest(object):
units=logits_dimension,
hidden_units=hidden_units,
feature_columns=[
- feature_column.numeric_column(
+ self._fc_impl.numeric_column(
'age', shape=np.array(inputs).shape[1:])
],
activation_fn=nn.relu,
@@ -786,8 +789,8 @@ class BaseDNNLogitFnTest(object):
units=logits_dimension,
hidden_units=hidden_units,
feature_columns=[
- feature_column.numeric_column('age'),
- feature_column.numeric_column('height')
+ self._fc_impl.numeric_column('age'),
+ self._fc_impl.numeric_column('height')
],
activation_fn=nn.relu,
dropout=None,
@@ -806,9 +809,13 @@ class BaseDNNLogitFnTest(object):
class BaseDNNWarmStartingTest(object):
- def __init__(self, _dnn_classifier_fn, _dnn_regressor_fn):
+ def __init__(self,
+ _dnn_classifier_fn,
+ _dnn_regressor_fn,
+ fc_impl=feature_column):
self._dnn_classifier_fn = _dnn_classifier_fn
self._dnn_regressor_fn = _dnn_regressor_fn
+ self._fc_impl = fc_impl
def setUp(self):
# Create a directory to save our old checkpoint and vocabularies to.
@@ -843,8 +850,8 @@ class BaseDNNWarmStartingTest(object):
def test_classifier_basic_warm_starting(self):
"""Tests correctness of DNNClassifier default warm-start."""
- city = feature_column.embedding_column(
- feature_column.categorical_column_with_vocabulary_list(
+ city = self._fc_impl.embedding_column(
+ self._fc_impl.categorical_column_with_vocabulary_list(
'city', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
@@ -875,8 +882,8 @@ class BaseDNNWarmStartingTest(object):
def test_regressor_basic_warm_starting(self):
"""Tests correctness of DNNRegressor default warm-start."""
- city = feature_column.embedding_column(
- feature_column.categorical_column_with_vocabulary_list(
+ city = self._fc_impl.embedding_column(
+ self._fc_impl.categorical_column_with_vocabulary_list(
'city', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
@@ -905,8 +912,8 @@ class BaseDNNWarmStartingTest(object):
def test_warm_starting_selective_variables(self):
"""Tests selecting variables to warm-start."""
- city = feature_column.embedding_column(
- feature_column.categorical_column_with_vocabulary_list(
+ city = self._fc_impl.embedding_column(
+ self._fc_impl.categorical_column_with_vocabulary_list(
'city', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
@@ -958,8 +965,8 @@ class BaseDNNWarmStartingTest(object):
vocab_file = os.path.join(self._ckpt_and_vocab_dir, 'occupation_vocab')
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_list))
- occupation = feature_column.embedding_column(
- feature_column.categorical_column_with_vocabulary_file(
+ occupation = self._fc_impl.embedding_column(
+ self._fc_impl.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=vocab_file,
vocabulary_size=len(vocab_list)),
@@ -985,8 +992,8 @@ class BaseDNNWarmStartingTest(object):
'new_occupation_vocab')
with open(new_vocab_file, 'w') as f:
f.write('\n'.join(new_vocab_list))
- new_occupation = feature_column.embedding_column(
- feature_column.categorical_column_with_vocabulary_file(
+ new_occupation = self._fc_impl.embedding_column(
+ self._fc_impl.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=new_vocab_file,
vocabulary_size=len(new_vocab_list)),
@@ -1051,8 +1058,8 @@ class BaseDNNWarmStartingTest(object):
def test_warm_starting_with_naming_change(self):
"""Tests warm-starting with a Tensor name remapping."""
- locality = feature_column.embedding_column(
- feature_column.categorical_column_with_vocabulary_list(
+ locality = self._fc_impl.embedding_column(
+ self._fc_impl.categorical_column_with_vocabulary_list(
'locality', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
@@ -1068,8 +1075,8 @@ class BaseDNNWarmStartingTest(object):
# Create a second DNNClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
- city = feature_column.embedding_column(
- feature_column.categorical_column_with_vocabulary_list(
+ city = self._fc_impl.embedding_column(
+ self._fc_impl.categorical_column_with_vocabulary_list(
'city', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
warm_started_dnn_classifier = self._dnn_classifier_fn(
@@ -1101,8 +1108,9 @@ class BaseDNNWarmStartingTest(object):
class BaseDNNClassifierEvaluateTest(object):
- def __init__(self, dnn_classifier_fn):
+ def __init__(self, dnn_classifier_fn, fc_impl=feature_column):
self._dnn_classifier_fn = dnn_classifier_fn
+ self._fc_impl = fc_impl
def setUp(self):
self._model_dir = tempfile.mkdtemp()
@@ -1121,7 +1129,7 @@ class BaseDNNClassifierEvaluateTest(object):
dnn_classifier = self._dnn_classifier_fn(
hidden_units=(2, 2),
- feature_columns=[feature_column.numeric_column('age')],
+ feature_columns=[self._fc_impl.numeric_column('age')],
model_dir=self._model_dir)
def _input_fn():
# batch_size = 2, one false label, and one true.
@@ -1161,7 +1169,7 @@ class BaseDNNClassifierEvaluateTest(object):
dnn_classifier = self._dnn_classifier_fn(
hidden_units=(2, 2),
- feature_columns=[feature_column.numeric_column('age', shape=[2])],
+ feature_columns=[self._fc_impl.numeric_column('age', shape=[2])],
n_classes=n_classes,
model_dir=self._model_dir)
def _input_fn():
@@ -1192,7 +1200,7 @@ class BaseDNNClassifierEvaluateTest(object):
dnn_classifier = self._dnn_classifier_fn(
hidden_units=(2, 2),
- feature_columns=[feature_column.numeric_column('age')],
+ feature_columns=[self._fc_impl.numeric_column('age')],
model_dir=self._model_dir)
def _input_fn():
# batch_size = 2, one false label, and one true.
@@ -1218,7 +1226,7 @@ class BaseDNNClassifierEvaluateTest(object):
dnn_classifier = self._dnn_classifier_fn(
hidden_units=(2, 2),
- feature_columns=[feature_column.numeric_column('age', shape=[2])],
+ feature_columns=[self._fc_impl.numeric_column('age', shape=[2])],
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
@@ -1238,8 +1246,9 @@ class BaseDNNClassifierEvaluateTest(object):
class BaseDNNRegressorEvaluateTest(object):
- def __init__(self, dnn_regressor_fn):
+ def __init__(self, dnn_regressor_fn, fc_impl=feature_column):
self._dnn_regressor_fn = dnn_regressor_fn
+ self._fc_impl = fc_impl
def setUp(self):
self._model_dir = tempfile.mkdtemp()
@@ -1259,7 +1268,7 @@ class BaseDNNRegressorEvaluateTest(object):
dnn_regressor = self._dnn_regressor_fn(
hidden_units=(2, 2),
- feature_columns=[feature_column.numeric_column('age')],
+ feature_columns=[self._fc_impl.numeric_column('age')],
model_dir=self._model_dir)
def _input_fn():
return {'age': [[10.]]}, [[1.]]
@@ -1289,7 +1298,7 @@ class BaseDNNRegressorEvaluateTest(object):
dnn_regressor = self._dnn_regressor_fn(
hidden_units=(2, 2),
- feature_columns=[feature_column.numeric_column('age', shape=[2])],
+ feature_columns=[self._fc_impl.numeric_column('age', shape=[2])],
label_dimension=label_dimension,
model_dir=self._model_dir)
def _input_fn():
@@ -1320,7 +1329,7 @@ class BaseDNNRegressorEvaluateTest(object):
dnn_regressor = self._dnn_regressor_fn(
hidden_units=(2, 2),
- feature_columns=[feature_column.numeric_column('age', shape=[2])],
+ feature_columns=[self._fc_impl.numeric_column('age', shape=[2])],
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
@@ -1339,8 +1348,9 @@ class BaseDNNRegressorEvaluateTest(object):
class BaseDNNClassifierPredictTest(object):
- def __init__(self, dnn_classifier_fn):
+ def __init__(self, dnn_classifier_fn, fc_impl=feature_column):
self._dnn_classifier_fn = dnn_classifier_fn
+ self._fc_impl = fc_impl
def setUp(self):
self._model_dir = tempfile.mkdtemp()
@@ -1361,7 +1371,7 @@ class BaseDNNClassifierPredictTest(object):
dnn_classifier = self._dnn_classifier_fn(
hidden_units=(2, 2),
label_vocabulary=label_vocabulary,
- feature_columns=(feature_column.numeric_column('x'),),
+ feature_columns=(self._fc_impl.numeric_column('x'),),
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
@@ -1405,7 +1415,7 @@ class BaseDNNClassifierPredictTest(object):
dnn_classifier = self._dnn_classifier_fn(
hidden_units=(2, 2),
- feature_columns=(feature_column.numeric_column('x', shape=(2,)),),
+ feature_columns=(self._fc_impl.numeric_column('x', shape=(2,)),),
label_vocabulary=label_vocabulary,
n_classes=3,
model_dir=self._model_dir)
@@ -1453,8 +1463,9 @@ class BaseDNNClassifierPredictTest(object):
class BaseDNNRegressorPredictTest(object):
- def __init__(self, dnn_regressor_fn):
+ def __init__(self, dnn_regressor_fn, fc_impl=feature_column):
self._dnn_regressor_fn = dnn_regressor_fn
+ self._fc_impl = fc_impl
def setUp(self):
self._model_dir = tempfile.mkdtemp()
@@ -1475,7 +1486,7 @@ class BaseDNNRegressorPredictTest(object):
dnn_regressor = self._dnn_regressor_fn(
hidden_units=(2, 2),
- feature_columns=(feature_column.numeric_column('x'),),
+ feature_columns=(self._fc_impl.numeric_column('x'),),
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
@@ -1497,7 +1508,7 @@ class BaseDNNRegressorPredictTest(object):
dnn_regressor = self._dnn_regressor_fn(
hidden_units=(2, 2),
- feature_columns=(feature_column.numeric_column('x', shape=(2,)),),
+ feature_columns=(self._fc_impl.numeric_column('x', shape=(2,)),),
label_dimension=3,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
@@ -1594,8 +1605,9 @@ def _assert_simple_summary(testcase, expected_values, actual_summary):
class BaseDNNClassifierTrainTest(object):
- def __init__(self, dnn_classifier_fn):
+ def __init__(self, dnn_classifier_fn, fc_impl=feature_column):
self._dnn_classifier_fn = dnn_classifier_fn
+ self._fc_impl = fc_impl
def setUp(self):
self._model_dir = tempfile.mkdtemp()
@@ -1609,7 +1621,7 @@ class BaseDNNClassifierTrainTest(object):
hidden_units = (2, 2)
dnn_classifier = self._dnn_classifier_fn(
hidden_units=hidden_units,
- feature_columns=(feature_column.numeric_column('age'),),
+ feature_columns=(self._fc_impl.numeric_column('age'),),
model_dir=self._model_dir)
# Train for a few steps, then validate final checkpoint.
@@ -1625,7 +1637,7 @@ class BaseDNNClassifierTrainTest(object):
n_classes = 3
dnn_classifier = self._dnn_classifier_fn(
hidden_units=hidden_units,
- feature_columns=(feature_column.numeric_column('age'),),
+ feature_columns=(self._fc_impl.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
@@ -1643,7 +1655,7 @@ class BaseDNNClassifierTrainTest(object):
self, hidden_units=hidden_units)
dnn_classifier = self._dnn_classifier_fn(
hidden_units=hidden_units,
- feature_columns=(feature_column.numeric_column('age'),),
+ feature_columns=(self._fc_impl.numeric_column('age'),),
optimizer=opt,
model_dir=self._model_dir)
self.assertEqual(0, opt.minimize.call_count)
@@ -1682,7 +1694,7 @@ class BaseDNNClassifierTrainTest(object):
self, hidden_units=hidden_units, expected_loss=expected_loss)
dnn_classifier = self._dnn_classifier_fn(
hidden_units=hidden_units,
- feature_columns=(feature_column.numeric_column('age'),),
+ feature_columns=(self._fc_impl.numeric_column('age'),),
optimizer=opt,
model_dir=self._model_dir)
self.assertEqual(0, opt.minimize.call_count)
@@ -1728,7 +1740,7 @@ class BaseDNNClassifierTrainTest(object):
self, hidden_units=hidden_units, expected_loss=expected_loss)
dnn_classifier = self._dnn_classifier_fn(
hidden_units=hidden_units,
- feature_columns=(feature_column.numeric_column('age'),),
+ feature_columns=(self._fc_impl.numeric_column('age'),),
optimizer=opt,
model_dir=self._model_dir)
self.assertEqual(0, opt.minimize.call_count)
@@ -1759,7 +1771,7 @@ class BaseDNNClassifierTrainTest(object):
dnn_classifier = self._dnn_classifier_fn(
n_classes=n_classes,
hidden_units=hidden_units,
- feature_columns=(feature_column.numeric_column('age'),),
+ feature_columns=(self._fc_impl.numeric_column('age'),),
optimizer=opt,
model_dir=self._model_dir)
self.assertEqual(0, opt.minimize.call_count)
@@ -1793,8 +1805,9 @@ class BaseDNNClassifierTrainTest(object):
class BaseDNNRegressorTrainTest(object):
- def __init__(self, dnn_regressor_fn):
+ def __init__(self, dnn_regressor_fn, fc_impl=feature_column):
self._dnn_regressor_fn = dnn_regressor_fn
+ self._fc_impl = fc_impl
def setUp(self):
self._model_dir = tempfile.mkdtemp()
@@ -1808,7 +1821,7 @@ class BaseDNNRegressorTrainTest(object):
hidden_units = (2, 2)
dnn_regressor = self._dnn_regressor_fn(
hidden_units=hidden_units,
- feature_columns=(feature_column.numeric_column('age'),),
+ feature_columns=(self._fc_impl.numeric_column('age'),),
model_dir=self._model_dir)
# Train for a few steps, then validate final checkpoint.
@@ -1824,7 +1837,7 @@ class BaseDNNRegressorTrainTest(object):
opt = mock_optimizer(self, hidden_units=hidden_units)
dnn_regressor = self._dnn_regressor_fn(
hidden_units=hidden_units,
- feature_columns=(feature_column.numeric_column('age'),),
+ feature_columns=(self._fc_impl.numeric_column('age'),),
optimizer=opt,
model_dir=self._model_dir)
self.assertEqual(0, opt.minimize.call_count)
@@ -1864,7 +1877,7 @@ class BaseDNNRegressorTrainTest(object):
self, hidden_units=hidden_units, expected_loss=expected_loss)
dnn_regressor = self._dnn_regressor_fn(
hidden_units=hidden_units,
- feature_columns=(feature_column.numeric_column('age'),),
+ feature_columns=(self._fc_impl.numeric_column('age'),),
optimizer=opt,
model_dir=self._model_dir)
self.assertEqual(0, opt.minimize.call_count)
@@ -1917,7 +1930,8 @@ class BaseDNNRegressorTrainTest(object):
dnn_regressor = self._dnn_regressor_fn(
hidden_units=hidden_units,
feature_columns=[
- feature_column.numeric_column('age', shape=[input_dimension])],
+ self._fc_impl.numeric_column('age', shape=[input_dimension])
+ ],
label_dimension=label_dimension,
optimizer=opt,
model_dir=self._model_dir)