diff options
author | Rohan Jain <rohanj@google.com> | 2018-09-18 19:39:27 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-09-18 19:43:44 -0700 |
commit | 9fe177881224571aff0c267593f747f5fd7a2967 (patch) | |
tree | 9c5051a7336ac9832171ebfee8e610ba550d0f1e /tensorflow/python/estimator | |
parent | 9ee75bb6e29007b8b5ea4a6d981996d8a4d88373 (diff) |
Getting DNNModel to work with the new feature columns.
PiperOrigin-RevId: 213561495
Diffstat (limited to 'tensorflow/python/estimator')
-rw-r--r-- | tensorflow/python/estimator/BUILD | 2 | ||||
-rw-r--r-- | tensorflow/python/estimator/canned/dnn.py | 181 | ||||
-rw-r--r-- | tensorflow/python/estimator/canned/dnn_linear_combined.py | 7 | ||||
-rw-r--r-- | tensorflow/python/estimator/canned/dnn_test.py | 146 | ||||
-rw-r--r-- | tensorflow/python/estimator/canned/dnn_testing_utils.py | 227 |
5 files changed, 446 insertions, 117 deletions
diff --git a/tensorflow/python/estimator/BUILD b/tensorflow/python/estimator/BUILD index bfcc019dd5..2026bf8c4f 100644 --- a/tensorflow/python/estimator/BUILD +++ b/tensorflow/python/estimator/BUILD @@ -251,6 +251,7 @@ py_library( ":prediction_keys", "//tensorflow:tensorflow_py_no_contrib", "//third_party/py/numpy", + "@absl_py//absl/testing:parameterized", "@six_archive//:six", ], ) @@ -273,6 +274,7 @@ py_test( ":pandas_io", ":prediction_keys", "//tensorflow:tensorflow_py_no_contrib", + "@absl_py//absl/testing:parameterized", "@six_archive//:six", ], ) diff --git a/tensorflow/python/estimator/canned/dnn.py b/tensorflow/python/estimator/canned/dnn.py index 1c0c4581c0..344113a5d8 100644 --- a/tensorflow/python/estimator/canned/dnn.py +++ b/tensorflow/python/estimator/canned/dnn.py @@ -24,7 +24,9 @@ from tensorflow.python.estimator import estimator from tensorflow.python.estimator import model_fn from tensorflow.python.estimator.canned import head as head_lib from tensorflow.python.estimator.canned import optimizers -from tensorflow.python.feature_column import feature_column as feature_column_lib +from tensorflow.python.feature_column import feature_column +from tensorflow.python.feature_column import feature_column_v2 +from tensorflow.python.keras.engine import training from tensorflow.python.layers import core as core_layers from tensorflow.python.layers import normalization from tensorflow.python.ops import init_ops @@ -45,8 +47,14 @@ def _add_hidden_layer_summary(value, tag): summary.histogram('%s/activation' % tag, value) -def _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn, - dropout, input_layer_partitioner, batch_norm): +def _dnn_logit_fn_builder(units, + hidden_units, + feature_columns, + activation_fn, + dropout, + input_layer_partitioner, + batch_norm, + shared_state_manager=None): """Function builder for a dnn logit_fn. Args: @@ -60,6 +68,8 @@ def _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn, coordinate. input_layer_partitioner: Partitioner for input layer. batch_norm: Whether to use batch normalization after each hidden layer. + shared_state_manager: A SharedEmbeddingStateManager object to hold the + shared state for SharedEmbeddingColumn's. Returns: A logit_fn (see below). @@ -85,50 +95,110 @@ def _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn, A `Tensor` representing the logits, or a list of `Tensor`'s representing multiple logits in the MultiHead case. """ - is_training = mode == model_fn.ModeKeys.TRAIN - with variable_scope.variable_scope( - 'input_from_feature_columns', - values=tuple(six.itervalues(features)), - partitioner=input_layer_partitioner): - net = feature_column_lib.input_layer( - features=features, feature_columns=feature_columns) + dnn_model = _DNNModel( + units, + hidden_units, + feature_columns, + activation_fn, + dropout, + input_layer_partitioner, + batch_norm, + shared_state_manager, + name='dnn') + return dnn_model(features, mode) + + return dnn_logit_fn + + +class _DNNModel(training.Model): + """A DNN Model.""" + + def __init__(self, + units, + hidden_units, + feature_columns, + activation_fn, + dropout, + input_layer_partitioner, + batch_norm, + shared_state_manager, + name=None, + **kwargs): + super(_DNNModel, self).__init__(name=name, **kwargs) + + if feature_column_v2.is_feature_column_v2(feature_columns): + input_layer = feature_column_v2.FeatureLayer( + feature_columns=feature_columns, + name='input_layer', + shared_state_manager=shared_state_manager) + else: + with variable_scope.variable_scope('input_from_feature_columns'): + input_layer = feature_column.InputLayer( + feature_columns=feature_columns, name='input_layer') + + self._input_layer = self._add_layers([input_layer])[0] + + self._dropout = dropout + self._batch_norm = batch_norm + + hidden_layers = [] + dropout_layers = [] + batch_norm_layers = [] for layer_id, num_hidden_units in enumerate(hidden_units): - with variable_scope.variable_scope( - 'hiddenlayer_%d' % layer_id, values=(net,)) as hidden_layer_scope: - net = core_layers.dense( - net, - units=num_hidden_units, - activation=activation_fn, - kernel_initializer=init_ops.glorot_uniform_initializer(), - name=hidden_layer_scope) - if dropout is not None and is_training: - net = core_layers.dropout(net, rate=dropout, training=True) - if batch_norm: - # TODO(hjm): In future, if this becomes popular, we can enable - # customization of the batch normalization params by accepting a - # list of `BatchNormalization` instances as `batch_norm`. - net = normalization.batch_normalization( - net, - # The default momentum 0.99 actually crashes on certain - # problem, so here we use 0.999, which is the default of - # tf.contrib.layers.batch_norm. - momentum=0.999, - training=is_training, - name='batchnorm_%d' % layer_id) - _add_hidden_layer_summary(net, hidden_layer_scope.name) - - with variable_scope.variable_scope('logits', values=(net,)) as logits_scope: - logits = core_layers.dense( - net, - units=units, - activation=None, + hidden_layer = core_layers.Dense( + units=num_hidden_units, + activation=activation_fn, kernel_initializer=init_ops.glorot_uniform_initializer(), - name=logits_scope) - _add_hidden_layer_summary(logits, logits_scope.name) - + name='hiddenlayer_%d' % layer_id) + hidden_layers.append(hidden_layer) + if self._dropout is not None: + dropout_layer = core_layers.Dropout(rate=dropout) + dropout_layers.append(dropout_layer) + if self._batch_norm: + batch_norm_layer = normalization.BatchNormalization( + # The default momentum 0.99 actually crashes on certain + # problem, so here we use 0.999, which is the default of + # tf.contrib.layers.batch_norm. + momentum=0.999, + trainable=True, + name='hiddenlayer_%d/batchnorm_%d' % (layer_id, layer_id)) + batch_norm_layers.append(batch_norm_layer) + + self._hidden_layers = self._add_layers(hidden_layers) + if self._dropout is not None: + self._dropout_layers = self._add_layers(dropout_layers) + if self._batch_norm: + self._batch_norm_layers = self._add_layers(batch_norm_layers) + + self._logits_layer = core_layers.Dense( + units=units, + activation=None, + kernel_initializer=init_ops.glorot_uniform_initializer(), + name='logits') + + def call(self, features, mode): + is_training = mode == model_fn.ModeKeys.TRAIN + with variable_scope.variable_scope('input_from_feature_columns'): + net = self._input_layer(features) + for i in range(len(self._hidden_layers)): + net = self._hidden_layers[i](net) + if self._dropout is not None and is_training: + net = self._dropout_layers[i](net) + if self._batch_norm: + net = self._batch_norm_layers[i](net, training=is_training) + _add_hidden_layer_summary(net, self._hidden_layers[i].name) + + logits = self._logits_layer(net) + _add_hidden_layer_summary(logits, self._logits_layer.name) return logits - return dnn_logit_fn + def _add_layers(self, layers): + # "Magic" required for keras.Model classes to track all the variables in + # a list of layers.Layer objects. + # TODO(ashankar): Figure out API so user code doesn't have to do this. + for layer in layers: + setattr(self, layer.name, layer) + return layers def _dnn_model_fn(features, @@ -143,7 +213,8 @@ def _dnn_model_fn(features, input_layer_partitioner=None, config=None, use_tpu=False, - batch_norm=False): + batch_norm=False, + shared_state_manager=None): """Deep Neural Net model_fn. Args: @@ -167,6 +238,8 @@ def _dnn_model_fn(features, use_tpu: Whether to make a DNN model able to run on TPU. Will make function return a `_TPUEstimatorSpec` instance and disable variable partitioning. batch_norm: Whether to use batch normalization after each hidden layer. + shared_state_manager: A SharedEmbeddingStateManager object to hold the + shared state for SharedEmbeddingColumn's. Returns: An `EstimatorSpec` instance. @@ -202,7 +275,8 @@ def _dnn_model_fn(features, activation_fn=activation_fn, dropout=dropout, input_layer_partitioner=input_layer_partitioner, - batch_norm=batch_norm) + batch_norm=batch_norm, + shared_state_manager=shared_state_manager) logits = logit_fn(features=features, mode=mode) if use_tpu: @@ -370,6 +444,10 @@ class DNNClassifier(estimator.Estimator): """ head = head_lib._binary_logistic_or_multi_class_head( # pylint: disable=protected-access n_classes, weight_column, label_vocabulary, loss_reduction) + + shared_state_manager = feature_column_v2.maybe_create_shared_state_manager( + feature_columns) + def _model_fn(features, labels, mode, config): """Call the defined shared _dnn_model_fn.""" return _dnn_model_fn( @@ -384,7 +462,8 @@ class DNNClassifier(estimator.Estimator): dropout=dropout, input_layer_partitioner=input_layer_partitioner, config=config, - batch_norm=batch_norm) + batch_norm=batch_norm, + shared_state_manager=shared_state_manager) super(DNNClassifier, self).__init__( model_fn=_model_fn, model_dir=model_dir, config=config, @@ -532,6 +611,10 @@ class DNNRegressor(estimator.Estimator): batch_norm: Whether to use batch normalization after each hidden layer. """ + shared_state_manager = None + if feature_column_v2.is_feature_column_v2(feature_columns): + shared_state_manager = feature_column_v2.SharedEmbeddingStateManager() + def _model_fn(features, labels, mode, config): """Call the defined shared _dnn_model_fn.""" return _dnn_model_fn( @@ -539,7 +622,8 @@ class DNNRegressor(estimator.Estimator): labels=labels, mode=mode, head=head_lib._regression_head( # pylint: disable=protected-access - label_dimension=label_dimension, weight_column=weight_column, + label_dimension=label_dimension, + weight_column=weight_column, loss_reduction=loss_reduction), hidden_units=hidden_units, feature_columns=tuple(feature_columns or []), @@ -548,7 +632,8 @@ class DNNRegressor(estimator.Estimator): dropout=dropout, input_layer_partitioner=input_layer_partitioner, config=config, - batch_norm=batch_norm) + batch_norm=batch_norm, + shared_state_manager=shared_state_manager) super(DNNRegressor, self).__init__( model_fn=_model_fn, model_dir=model_dir, config=config, diff --git a/tensorflow/python/estimator/canned/dnn_linear_combined.py b/tensorflow/python/estimator/canned/dnn_linear_combined.py index 9799cf9e98..f2ac9a7466 100644 --- a/tensorflow/python/estimator/canned/dnn_linear_combined.py +++ b/tensorflow/python/estimator/canned/dnn_linear_combined.py @@ -27,6 +27,7 @@ from tensorflow.python.estimator.canned import dnn from tensorflow.python.estimator.canned import head as head_lib from tensorflow.python.estimator.canned import linear from tensorflow.python.estimator.canned import optimizers +from tensorflow.python.feature_column import feature_column_v2 from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import nn @@ -142,6 +143,9 @@ def _dnn_linear_combined_model_fn(features, max_partitions=num_ps_replicas, min_slice_size=64 << 20)) + shared_state_manager = feature_column_v2.maybe_create_shared_state_manager( + list(linear_feature_columns) + list(dnn_feature_columns)) + # Build DNN Logits. dnn_parent_scope = 'dnn' @@ -170,7 +174,8 @@ def _dnn_linear_combined_model_fn(features, activation_fn=dnn_activation_fn, dropout=dnn_dropout, input_layer_partitioner=input_layer_partitioner, - batch_norm=batch_norm) + batch_norm=batch_norm, + shared_state_manager=shared_state_manager) dnn_logits = dnn_logit_fn(features=features, mode=mode) linear_parent_scope = 'linear' diff --git a/tensorflow/python/estimator/canned/dnn_test.py b/tensorflow/python/estimator/canned/dnn_test.py index fc90b7c35e..e64cd522b4 100644 --- a/tensorflow/python/estimator/canned/dnn_test.py +++ b/tensorflow/python/estimator/canned/dnn_test.py @@ -21,6 +21,7 @@ from __future__ import print_function import shutil import tempfile +from absl.testing import parameterized import numpy as np import six @@ -33,6 +34,7 @@ from tensorflow.python.estimator.export import export from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.estimator.inputs import pandas_io from tensorflow.python.feature_column import feature_column +from tensorflow.python.feature_column import feature_column_v2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import data_flow_ops @@ -65,6 +67,14 @@ class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase): dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn) +class DNNModelFnV2Test(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase): + + def __init__(self, methodName='runTest'): # pylint: disable=invalid-name + test.TestCase.__init__(self, methodName) + dnn_testing_utils.BaseDNNModelFnTest.__init__( + self, dnn._dnn_model_fn, is_fc_v2=True) + + class DNNLogitFnTest(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name @@ -73,6 +83,14 @@ class DNNLogitFnTest(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase): dnn._dnn_logit_fn_builder) +class DNNLogitFnV2Test(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase): + + def __init__(self, methodName='runTest'): # pylint: disable=invalid-name + test.TestCase.__init__(self, methodName) + dnn_testing_utils.BaseDNNLogitFnTest.__init__( + self, dnn._dnn_logit_fn_builder, is_fc_v2=True) + + class DNNWarmStartingTest(dnn_testing_utils.BaseDNNWarmStartingTest, test.TestCase): @@ -82,6 +100,15 @@ class DNNWarmStartingTest(dnn_testing_utils.BaseDNNWarmStartingTest, _dnn_regressor_fn) +class DNNWarmStartingV2Test(dnn_testing_utils.BaseDNNWarmStartingTest, + test.TestCase): + + def __init__(self, methodName='runTest'): # pylint: disable=invalid-name + test.TestCase.__init__(self, methodName) + dnn_testing_utils.BaseDNNWarmStartingTest.__init__( + self, _dnn_classifier_fn, _dnn_regressor_fn, is_fc_v2=True) + + class DNNClassifierEvaluateTest( dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase): @@ -91,6 +118,15 @@ class DNNClassifierEvaluateTest( self, _dnn_classifier_fn) +class DNNClassifierEvaluateV2Test( + dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase): + + def __init__(self, methodName='runTest'): # pylint: disable=invalid-name + test.TestCase.__init__(self, methodName) + dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__( + self, _dnn_classifier_fn, is_fc_v2=True) + + class DNNClassifierPredictTest( dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase): @@ -100,6 +136,15 @@ class DNNClassifierPredictTest( self, _dnn_classifier_fn) +class DNNClassifierPredictV2Test(dnn_testing_utils.BaseDNNClassifierPredictTest, + test.TestCase): + + def __init__(self, methodName='runTest'): # pylint: disable=invalid-name + test.TestCase.__init__(self, methodName) + dnn_testing_utils.BaseDNNClassifierPredictTest.__init__( + self, _dnn_classifier_fn, is_fc_v2=True) + + class DNNClassifierTrainTest( dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase): @@ -109,6 +154,15 @@ class DNNClassifierTrainTest( self, _dnn_classifier_fn) +class DNNClassifierTrainV2Test(dnn_testing_utils.BaseDNNClassifierTrainTest, + test.TestCase): + + def __init__(self, methodName='runTest'): # pylint: disable=invalid-name + test.TestCase.__init__(self, methodName) + dnn_testing_utils.BaseDNNClassifierTrainTest.__init__( + self, _dnn_classifier_fn, is_fc_v2=True) + + def _dnn_regressor_fn(*args, **kwargs): return dnn.DNNRegressor(*args, **kwargs) @@ -122,6 +176,15 @@ class DNNRegressorEvaluateTest( self, _dnn_regressor_fn) +class DNNRegressorEvaluateV2Test(dnn_testing_utils.BaseDNNRegressorEvaluateTest, + test.TestCase): + + def __init__(self, methodName='runTest'): # pylint: disable=invalid-name + test.TestCase.__init__(self, methodName) + dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__( + self, _dnn_regressor_fn, is_fc_v2=True) + + class DNNRegressorPredictTest( dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase): @@ -131,6 +194,15 @@ class DNNRegressorPredictTest( self, _dnn_regressor_fn) +class DNNRegressorPredictV2Test(dnn_testing_utils.BaseDNNRegressorPredictTest, + test.TestCase): + + def __init__(self, methodName='runTest'): # pylint: disable=invalid-name + test.TestCase.__init__(self, methodName) + dnn_testing_utils.BaseDNNRegressorPredictTest.__init__( + self, _dnn_regressor_fn, is_fc_v2=True) + + class DNNRegressorTrainTest( dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase): @@ -140,6 +212,15 @@ class DNNRegressorTrainTest( self, _dnn_regressor_fn) +class DNNRegressorTrainV2Test(dnn_testing_utils.BaseDNNRegressorTrainTest, + test.TestCase): + + def __init__(self, methodName='runTest'): # pylint: disable=invalid-name + test.TestCase.__init__(self, methodName) + dnn_testing_utils.BaseDNNRegressorTrainTest.__init__( + self, _dnn_regressor_fn, is_fc_v2=True) + + def _queue_parsed_features(feature_map): tensors_to_enqueue = [] keys = [] @@ -156,7 +237,8 @@ def _queue_parsed_features(feature_map): return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))} -class DNNRegressorIntegrationTest(test.TestCase): +@parameterized.parameters((True,), (False,)) +class DNNRegressorIntegrationTest(test.TestCase, parameterized.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() @@ -166,11 +248,16 @@ class DNNRegressorIntegrationTest(test.TestCase): writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) - def _test_complete_flow( - self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension, - label_dimension, batch_size): + def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn, + input_dimension, label_dimension, batch_size, + is_fc_v2): feature_columns = [ feature_column.numeric_column('x', shape=(input_dimension,))] + if is_fc_v2: + feature_columns = [ + feature_column_v2.numeric_column('x', shape=(input_dimension,)) + ] + est = dnn.DNNRegressor( hidden_units=(2, 2), feature_columns=feature_columns, @@ -194,14 +281,17 @@ class DNNRegressorIntegrationTest(test.TestCase): self.assertAllEqual((batch_size, label_dimension), predictions.shape) # EXPORT - feature_spec = feature_column.make_parse_example_spec(feature_columns) + if is_fc_v2: + feature_spec = feature_column_v2.make_parse_example_spec(feature_columns) + else: + feature_spec = feature_column.make_parse_example_spec(feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) - def test_numpy_input_fn(self): + def test_numpy_input_fn(self, is_fc_v2): """Tests complete flow with numpy_input_fn.""" label_dimension = 2 batch_size = 10 @@ -230,9 +320,10 @@ class DNNRegressorIntegrationTest(test.TestCase): predict_input_fn=predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, - batch_size=batch_size) + batch_size=batch_size, + is_fc_v2=is_fc_v2) - def test_pandas_input_fn(self): + def test_pandas_input_fn(self, is_fc_v2): """Tests complete flow with pandas_input_fn.""" if not HAS_PANDAS: return @@ -263,9 +354,10 @@ class DNNRegressorIntegrationTest(test.TestCase): predict_input_fn=predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, - batch_size=batch_size) + batch_size=batch_size, + is_fc_v2=is_fc_v2) - def test_input_fn_from_parse_example(self): + def test_input_fn_from_parse_example(self, is_fc_v2): """Tests complete flow with input_fn constructed from parse_example.""" label_dimension = 2 batch_size = 10 @@ -313,9 +405,11 @@ class DNNRegressorIntegrationTest(test.TestCase): predict_input_fn=_predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, - batch_size=batch_size) + batch_size=batch_size, + is_fc_v2=is_fc_v2) +@parameterized.parameters((True,), (False,)) class DNNClassifierIntegrationTest(test.TestCase): def setUp(self): @@ -329,11 +423,15 @@ class DNNClassifierIntegrationTest(test.TestCase): def _as_label(self, data_in_float): return np.rint(data_in_float).astype(np.int64) - def _test_complete_flow( - self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension, - n_classes, batch_size): + def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn, + input_dimension, n_classes, batch_size, is_fc_v2): feature_columns = [ feature_column.numeric_column('x', shape=(input_dimension,))] + if is_fc_v2: + feature_columns = [ + feature_column_v2.numeric_column('x', shape=(input_dimension,)) + ] + est = dnn.DNNClassifier( hidden_units=(2, 2), feature_columns=feature_columns, @@ -357,14 +455,17 @@ class DNNClassifierIntegrationTest(test.TestCase): self.assertAllEqual((batch_size, n_classes), predicted_proba.shape) # EXPORT - feature_spec = feature_column.make_parse_example_spec(feature_columns) + if is_fc_v2: + feature_spec = feature_column_v2.make_parse_example_spec(feature_columns) + else: + feature_spec = feature_column.make_parse_example_spec(feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) - def test_numpy_input_fn(self): + def test_numpy_input_fn(self, is_fc_v2): """Tests complete flow with numpy_input_fn.""" n_classes = 3 input_dimension = 2 @@ -396,9 +497,10 @@ class DNNClassifierIntegrationTest(test.TestCase): predict_input_fn=predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, - batch_size=batch_size) + batch_size=batch_size, + is_fc_v2=is_fc_v2) - def test_pandas_input_fn(self): + def test_pandas_input_fn(self, is_fc_v2): """Tests complete flow with pandas_input_fn.""" if not HAS_PANDAS: return @@ -430,9 +532,10 @@ class DNNClassifierIntegrationTest(test.TestCase): predict_input_fn=predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, - batch_size=batch_size) + batch_size=batch_size, + is_fc_v2=is_fc_v2) - def test_input_fn_from_parse_example(self): + def test_input_fn_from_parse_example(self, is_fc_v2): """Tests complete flow with input_fn constructed from parse_example.""" input_dimension = 2 n_classes = 3 @@ -484,7 +587,8 @@ class DNNClassifierIntegrationTest(test.TestCase): predict_input_fn=_predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, - batch_size=batch_size) + batch_size=batch_size, + is_fc_v2=is_fc_v2) if __name__ == '__main__': diff --git a/tensorflow/python/estimator/canned/dnn_testing_utils.py b/tensorflow/python/estimator/canned/dnn_testing_utils.py index 11f1e93630..3b3b63cf65 100644 --- a/tensorflow/python/estimator/canned/dnn_testing_utils.py +++ b/tensorflow/python/estimator/canned/dnn_testing_utils.py @@ -34,6 +34,7 @@ from tensorflow.python.estimator.canned import metric_keys from tensorflow.python.estimator.canned import prediction_keys from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.feature_column import feature_column +from tensorflow.python.feature_column import feature_column_v2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops @@ -104,6 +105,7 @@ def create_checkpoint(weights_and_biases, weights_and_biases: Iterable of tuples of weight and bias values. global_step: Initial global step to save in checkpoint. model_dir: Directory into which checkpoint is saved. + batch_norm_vars: Variables used for batch normalization. """ weights, biases = zip(*weights_and_biases) if batch_norm_vars: @@ -244,8 +246,9 @@ def mock_optimizer(testcase, hidden_units, expected_loss=None): class BaseDNNModelFnTest(object): """Tests that _dnn_model_fn passes expected logits to mock head.""" - def __init__(self, dnn_model_fn): + def __init__(self, dnn_model_fn, is_fc_v2=False): self._dnn_model_fn = dnn_model_fn + self._is_fc_v2 = is_fc_v2 def setUp(self): self._model_dir = tempfile.mkdtemp() @@ -260,6 +263,11 @@ class BaseDNNModelFnTest(object): """Tests that the expected logits are passed to mock head.""" with ops.Graph().as_default(): training_util.create_global_step() + age_column = feature_column.numeric_column( + 'age', shape=np.array(inputs).shape[1:]) + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column( + 'age', shape=np.array(inputs).shape[1:]) head = mock_head( self, hidden_units=hidden_units, @@ -271,10 +279,7 @@ class BaseDNNModelFnTest(object): mode=mode, head=head, hidden_units=hidden_units, - feature_columns=[ - feature_column.numeric_column( - 'age', shape=np.array(inputs).shape[1:]) - ], + feature_columns=[age_column], optimizer=mock_optimizer(self, hidden_units)) with monitored_session.MonitoredTrainingSession( checkpoint_dir=self._model_dir) as sess: @@ -441,6 +446,16 @@ class BaseDNNModelFnTest(object): inputs = ([[10.]], [[8.]]) expected_logits = [[-0.48, 0.48, 0.39]] + feature_columns = [ + feature_column.numeric_column('age'), + feature_column.numeric_column('height') + ] + if self._is_fc_v2: + feature_columns = [ + feature_column_v2.numeric_column('age'), + feature_column_v2.numeric_column('height') + ] + for mode in [ model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.PREDICT @@ -461,10 +476,7 @@ class BaseDNNModelFnTest(object): mode=mode, head=head, hidden_units=hidden_units, - feature_columns=[ - feature_column.numeric_column('age'), - feature_column.numeric_column('height') - ], + feature_columns=feature_columns, optimizer=mock_optimizer(self, hidden_units)) with monitored_session.MonitoredTrainingSession( checkpoint_dir=self._model_dir) as sess: @@ -508,8 +520,9 @@ class BaseDNNModelFnTest(object): class BaseDNNLogitFnTest(object): """Tests correctness of logits calculated from _dnn_logit_fn_builder.""" - def __init__(self, dnn_logit_fn_builder): + def __init__(self, dnn_logit_fn_builder, is_fc_v2=False): self._dnn_logit_fn_builder = dnn_logit_fn_builder + self._is_fc_v2 = is_fc_v2 def setUp(self): self._model_dir = tempfile.mkdtemp() @@ -533,6 +546,12 @@ class BaseDNNLogitFnTest(object): training_util.create_global_step() # Use a variable scope here with 'dnn', emulating the dnn model_fn, so # the checkpoint naming is shared. + age_column = feature_column.numeric_column( + 'age', shape=np.array(inputs).shape[1:]) + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column( + 'age', shape=np.array(inputs).shape[1:]) + with variable_scope.variable_scope('dnn'): input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( @@ -540,10 +559,7 @@ class BaseDNNLogitFnTest(object): logit_fn = self._dnn_logit_fn_builder( units=logits_dimension, hidden_units=hidden_units, - feature_columns=[ - feature_column.numeric_column( - 'age', shape=np.array(inputs).shape[1:]) - ], + feature_columns=[age_column], activation_fn=nn.relu, dropout=None, input_layer_partitioner=input_layer_partitioner, @@ -768,6 +784,16 @@ class BaseDNNLogitFnTest(object): inputs = ([[10.]], [[8.]]) expected_logits = [[-0.48, 0.48, 0.39]] + feature_columns = [ + feature_column.numeric_column('age'), + feature_column.numeric_column('height') + ] + if self._is_fc_v2: + feature_columns = [ + feature_column_v2.numeric_column('age'), + feature_column_v2.numeric_column('height') + ] + for mode in [ model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.PREDICT @@ -785,10 +811,7 @@ class BaseDNNLogitFnTest(object): logit_fn = self._dnn_logit_fn_builder( units=logits_dimension, hidden_units=hidden_units, - feature_columns=[ - feature_column.numeric_column('age'), - feature_column.numeric_column('height') - ], + feature_columns=feature_columns, activation_fn=nn.relu, dropout=None, input_layer_partitioner=input_layer_partitioner, @@ -806,9 +829,10 @@ class BaseDNNLogitFnTest(object): class BaseDNNWarmStartingTest(object): - def __init__(self, _dnn_classifier_fn, _dnn_regressor_fn): + def __init__(self, _dnn_classifier_fn, _dnn_regressor_fn, is_fc_v2=False): self._dnn_classifier_fn = _dnn_classifier_fn self._dnn_regressor_fn = _dnn_regressor_fn + self._is_fc_v2 = is_fc_v2 def setUp(self): # Create a directory to save our old checkpoint and vocabularies to. @@ -847,6 +871,11 @@ class BaseDNNWarmStartingTest(object): feature_column.categorical_column_with_vocabulary_list( 'city', vocabulary_list=['Mountain View', 'Palo Alto']), dimension=5) + if self._is_fc_v2: + city = feature_column_v2.embedding_column( + feature_column_v2.categorical_column_with_vocabulary_list( + 'city', vocabulary_list=['Mountain View', 'Palo Alto']), + dimension=5) # Create a DNNClassifier and train to save a checkpoint. dnn_classifier = self._dnn_classifier_fn( @@ -879,6 +908,11 @@ class BaseDNNWarmStartingTest(object): feature_column.categorical_column_with_vocabulary_list( 'city', vocabulary_list=['Mountain View', 'Palo Alto']), dimension=5) + if self._is_fc_v2: + city = feature_column_v2.embedding_column( + feature_column_v2.categorical_column_with_vocabulary_list( + 'city', vocabulary_list=['Mountain View', 'Palo Alto']), + dimension=5) # Create a DNNRegressor and train to save a checkpoint. dnn_regressor = self._dnn_regressor_fn( @@ -909,6 +943,11 @@ class BaseDNNWarmStartingTest(object): feature_column.categorical_column_with_vocabulary_list( 'city', vocabulary_list=['Mountain View', 'Palo Alto']), dimension=5) + if self._is_fc_v2: + city = feature_column_v2.embedding_column( + feature_column_v2.categorical_column_with_vocabulary_list( + 'city', vocabulary_list=['Mountain View', 'Palo Alto']), + dimension=5) # Create a DNNClassifier and train to save a checkpoint. dnn_classifier = self._dnn_classifier_fn( @@ -964,6 +1003,13 @@ class BaseDNNWarmStartingTest(object): vocabulary_file=vocab_file, vocabulary_size=len(vocab_list)), dimension=2) + if self._is_fc_v2: + occupation = feature_column_v2.embedding_column( + feature_column_v2.categorical_column_with_vocabulary_file( + 'occupation', + vocabulary_file=vocab_file, + vocabulary_size=len(vocab_list)), + dimension=2) # Create a DNNClassifier and train to save a checkpoint. partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2) @@ -991,6 +1037,13 @@ class BaseDNNWarmStartingTest(object): vocabulary_file=new_vocab_file, vocabulary_size=len(new_vocab_list)), dimension=2) + if self._is_fc_v2: + new_occupation = feature_column_v2.embedding_column( + feature_column_v2.categorical_column_with_vocabulary_file( + 'occupation', + vocabulary_file=new_vocab_file, + vocabulary_size=len(new_vocab_list)), + dimension=2) # We can create our VocabInfo object from the new and old occupation # FeatureColumn's. occupation_vocab_info = estimator.VocabInfo( @@ -1055,6 +1108,11 @@ class BaseDNNWarmStartingTest(object): feature_column.categorical_column_with_vocabulary_list( 'locality', vocabulary_list=['Mountain View', 'Palo Alto']), dimension=5) + if self._is_fc_v2: + locality = feature_column_v2.embedding_column( + feature_column_v2.categorical_column_with_vocabulary_list( + 'locality', vocabulary_list=['Mountain View', 'Palo Alto']), + dimension=5) # Create a DNNClassifier and train to save a checkpoint. dnn_classifier = self._dnn_classifier_fn( @@ -1072,6 +1130,11 @@ class BaseDNNWarmStartingTest(object): feature_column.categorical_column_with_vocabulary_list( 'city', vocabulary_list=['Mountain View', 'Palo Alto']), dimension=5) + if self._is_fc_v2: + city = feature_column_v2.embedding_column( + feature_column_v2.categorical_column_with_vocabulary_list( + 'city', vocabulary_list=['Mountain View', 'Palo Alto']), + dimension=5) warm_started_dnn_classifier = self._dnn_classifier_fn( hidden_units=[256, 128], feature_columns=[city], @@ -1101,8 +1164,9 @@ class BaseDNNWarmStartingTest(object): class BaseDNNClassifierEvaluateTest(object): - def __init__(self, dnn_classifier_fn): + def __init__(self, dnn_classifier_fn, is_fc_v2=False): self._dnn_classifier_fn = dnn_classifier_fn + self._is_fc_v2 = is_fc_v2 def setUp(self): self._model_dir = tempfile.mkdtemp() @@ -1119,9 +1183,12 @@ class BaseDNNClassifierEvaluateTest(object): (([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]),), global_step, self._model_dir) + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') dnn_classifier = self._dnn_classifier_fn( hidden_units=(2, 2), - feature_columns=[feature_column.numeric_column('age')], + feature_columns=[age_column], model_dir=self._model_dir) def _input_fn(): # batch_size = 2, one false label, and one true. @@ -1159,9 +1226,12 @@ class BaseDNNClassifierEvaluateTest(object): .0]),), global_step, self._model_dir) n_classes = 3 + age_column = feature_column.numeric_column('age', shape=[2]) + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age', shape=[2]) dnn_classifier = self._dnn_classifier_fn( hidden_units=(2, 2), - feature_columns=[feature_column.numeric_column('age', shape=[2])], + feature_columns=[age_column], n_classes=n_classes, model_dir=self._model_dir) def _input_fn(): @@ -1190,9 +1260,12 @@ class BaseDNNClassifierEvaluateTest(object): (([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]),), global_step, self._model_dir) + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') dnn_classifier = self._dnn_classifier_fn( hidden_units=(2, 2), - feature_columns=[feature_column.numeric_column('age')], + feature_columns=[age_column], model_dir=self._model_dir) def _input_fn(): # batch_size = 2, one false label, and one true. @@ -1216,9 +1289,12 @@ class BaseDNNClassifierEvaluateTest(object): global_step, self._model_dir) n_classes = 3 + age_column = feature_column.numeric_column('age', shape=[2]) + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age', shape=[2]) dnn_classifier = self._dnn_classifier_fn( hidden_units=(2, 2), - feature_columns=[feature_column.numeric_column('age', shape=[2])], + feature_columns=[age_column], n_classes=n_classes, weight_column='w', model_dir=self._model_dir) @@ -1238,8 +1314,9 @@ class BaseDNNClassifierEvaluateTest(object): class BaseDNNRegressorEvaluateTest(object): - def __init__(self, dnn_regressor_fn): + def __init__(self, dnn_regressor_fn, is_fc_v2=False): self._dnn_regressor_fn = dnn_regressor_fn + self._is_fc_v2 = is_fc_v2 def setUp(self): self._model_dir = tempfile.mkdtemp() @@ -1257,9 +1334,12 @@ class BaseDNNRegressorEvaluateTest(object): (([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]),), global_step, self._model_dir) + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') dnn_regressor = self._dnn_regressor_fn( hidden_units=(2, 2), - feature_columns=[feature_column.numeric_column('age')], + feature_columns=[age_column], model_dir=self._model_dir) def _input_fn(): return {'age': [[10.]]}, [[1.]] @@ -1287,9 +1367,12 @@ class BaseDNNRegressorEvaluateTest(object): .0]),), global_step, self._model_dir) label_dimension = 3 + age_column = feature_column.numeric_column('age', shape=[2]) + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age', shape=[2]) dnn_regressor = self._dnn_regressor_fn( hidden_units=(2, 2), - feature_columns=[feature_column.numeric_column('age', shape=[2])], + feature_columns=[age_column], label_dimension=label_dimension, model_dir=self._model_dir) def _input_fn(): @@ -1318,9 +1401,12 @@ class BaseDNNRegressorEvaluateTest(object): global_step, self._model_dir) label_dimension = 3 + age_column = feature_column.numeric_column('age', shape=[2]) + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age', shape=[2]) dnn_regressor = self._dnn_regressor_fn( hidden_units=(2, 2), - feature_columns=[feature_column.numeric_column('age', shape=[2])], + feature_columns=[age_column], label_dimension=label_dimension, weight_column='w', model_dir=self._model_dir) @@ -1339,8 +1425,9 @@ class BaseDNNRegressorEvaluateTest(object): class BaseDNNClassifierPredictTest(object): - def __init__(self, dnn_classifier_fn): + def __init__(self, dnn_classifier_fn, is_fc_v2=False): self._dnn_classifier_fn = dnn_classifier_fn + self._is_fc_v2 = is_fc_v2 def setUp(self): self._model_dir = tempfile.mkdtemp() @@ -1358,10 +1445,13 @@ class BaseDNNClassifierPredictTest(object): global_step=0, model_dir=self._model_dir) + x_column = feature_column.numeric_column('x') + if self._is_fc_v2: + x_column = feature_column_v2.numeric_column('x') dnn_classifier = self._dnn_classifier_fn( hidden_units=(2, 2), label_vocabulary=label_vocabulary, - feature_columns=(feature_column.numeric_column('x'),), + feature_columns=(x_column,), model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[10.]])}, batch_size=1, shuffle=False) @@ -1403,9 +1493,12 @@ class BaseDNNClassifierPredictTest(object): global_step=0, model_dir=self._model_dir) + x_column = feature_column.numeric_column('x', shape=(2,)) + if self._is_fc_v2: + x_column = feature_column_v2.numeric_column('x', shape=(2,)) dnn_classifier = self._dnn_classifier_fn( hidden_units=(2, 2), - feature_columns=(feature_column.numeric_column('x', shape=(2,)),), + feature_columns=(x_column,), label_vocabulary=label_vocabulary, n_classes=3, model_dir=self._model_dir) @@ -1453,8 +1546,9 @@ class BaseDNNClassifierPredictTest(object): class BaseDNNRegressorPredictTest(object): - def __init__(self, dnn_regressor_fn): + def __init__(self, dnn_regressor_fn, is_fc_v2=False): self._dnn_regressor_fn = dnn_regressor_fn + self._is_fc_v2 = is_fc_v2 def setUp(self): self._model_dir = tempfile.mkdtemp() @@ -1473,9 +1567,12 @@ class BaseDNNRegressorPredictTest(object): global_step=0, model_dir=self._model_dir) + x_column = feature_column.numeric_column('x') + if self._is_fc_v2: + x_column = feature_column_v2.numeric_column('x') dnn_regressor = self._dnn_regressor_fn( hidden_units=(2, 2), - feature_columns=(feature_column.numeric_column('x'),), + feature_columns=(x_column,), model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[10.]])}, batch_size=1, shuffle=False) @@ -1495,9 +1592,12 @@ class BaseDNNRegressorPredictTest(object): [.3, -.3, .0]),), 100, self._model_dir) + x_column = feature_column.numeric_column('x', shape=(2,)) + if self._is_fc_v2: + x_column = feature_column_v2.numeric_column('x', shape=(2,)) dnn_regressor = self._dnn_regressor_fn( hidden_units=(2, 2), - feature_columns=(feature_column.numeric_column('x', shape=(2,)),), + feature_columns=(x_column,), label_dimension=3, model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( @@ -1594,8 +1694,9 @@ def _assert_simple_summary(testcase, expected_values, actual_summary): class BaseDNNClassifierTrainTest(object): - def __init__(self, dnn_classifier_fn): + def __init__(self, dnn_classifier_fn, is_fc_v2=False): self._dnn_classifier_fn = dnn_classifier_fn + self._is_fc_v2 = is_fc_v2 def setUp(self): self._model_dir = tempfile.mkdtemp() @@ -1606,10 +1707,13 @@ class BaseDNNClassifierTrainTest(object): shutil.rmtree(self._model_dir) def test_from_scratch_with_default_optimizer_binary(self): + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') hidden_units = (2, 2) dnn_classifier = self._dnn_classifier_fn( hidden_units=hidden_units, - feature_columns=(feature_column.numeric_column('age'),), + feature_columns=(age_column,), model_dir=self._model_dir) # Train for a few steps, then validate final checkpoint. @@ -1621,11 +1725,14 @@ class BaseDNNClassifierTrainTest(object): output_units=1, model_dir=self._model_dir) def test_from_scratch_with_default_optimizer_multi_class(self): + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') hidden_units = (2, 2) n_classes = 3 dnn_classifier = self._dnn_classifier_fn( hidden_units=hidden_units, - feature_columns=(feature_column.numeric_column('age'),), + feature_columns=(age_column,), n_classes=n_classes, model_dir=self._model_dir) @@ -1638,12 +1745,15 @@ class BaseDNNClassifierTrainTest(object): output_units=n_classes, model_dir=self._model_dir) def test_from_scratch_validate_summary(self): + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') hidden_units = (2, 2) opt = mock_optimizer( self, hidden_units=hidden_units) dnn_classifier = self._dnn_classifier_fn( hidden_units=hidden_units, - feature_columns=(feature_column.numeric_column('age'),), + feature_columns=(age_column,), optimizer=opt, model_dir=self._model_dir) self.assertEqual(0, opt.minimize.call_count) @@ -1667,6 +1777,9 @@ class BaseDNNClassifierTrainTest(object): self.assertIn(metric_keys.MetricKeys.LOSS_MEAN, summary_keys) def test_binary_classification(self): + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') base_global_step = 100 hidden_units = (2, 2) create_checkpoint( @@ -1682,7 +1795,7 @@ class BaseDNNClassifierTrainTest(object): self, hidden_units=hidden_units, expected_loss=expected_loss) dnn_classifier = self._dnn_classifier_fn( hidden_units=hidden_units, - feature_columns=(feature_column.numeric_column('age'),), + feature_columns=(age_column,), optimizer=opt, model_dir=self._model_dir) self.assertEqual(0, opt.minimize.call_count) @@ -1713,6 +1826,9 @@ class BaseDNNClassifierTrainTest(object): hidden_units=hidden_units, output_units=1, model_dir=self._model_dir) def test_binary_classification_float_labels(self): + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') base_global_step = 100 hidden_units = (2, 2) create_checkpoint( @@ -1728,7 +1844,7 @@ class BaseDNNClassifierTrainTest(object): self, hidden_units=hidden_units, expected_loss=expected_loss) dnn_classifier = self._dnn_classifier_fn( hidden_units=hidden_units, - feature_columns=(feature_column.numeric_column('age'),), + feature_columns=(age_column,), optimizer=opt, model_dir=self._model_dir) self.assertEqual(0, opt.minimize.call_count) @@ -1741,6 +1857,9 @@ class BaseDNNClassifierTrainTest(object): self.assertEqual(1, opt.minimize.call_count) def test_multi_class(self): + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') n_classes = 3 base_global_step = 100 hidden_units = (2, 2) @@ -1759,7 +1878,7 @@ class BaseDNNClassifierTrainTest(object): dnn_classifier = self._dnn_classifier_fn( n_classes=n_classes, hidden_units=hidden_units, - feature_columns=(feature_column.numeric_column('age'),), + feature_columns=(age_column,), optimizer=opt, model_dir=self._model_dir) self.assertEqual(0, opt.minimize.call_count) @@ -1793,8 +1912,9 @@ class BaseDNNClassifierTrainTest(object): class BaseDNNRegressorTrainTest(object): - def __init__(self, dnn_regressor_fn): + def __init__(self, dnn_regressor_fn, is_fc_v2=False): self._dnn_regressor_fn = dnn_regressor_fn + self._is_fc_v2 = is_fc_v2 def setUp(self): self._model_dir = tempfile.mkdtemp() @@ -1805,10 +1925,13 @@ class BaseDNNRegressorTrainTest(object): shutil.rmtree(self._model_dir) def test_from_scratch_with_default_optimizer(self): + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') hidden_units = (2, 2) dnn_regressor = self._dnn_regressor_fn( hidden_units=hidden_units, - feature_columns=(feature_column.numeric_column('age'),), + feature_columns=(age_column,), model_dir=self._model_dir) # Train for a few steps, then validate final checkpoint. @@ -1820,11 +1943,14 @@ class BaseDNNRegressorTrainTest(object): output_units=1, model_dir=self._model_dir) def test_from_scratch(self): + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') hidden_units = (2, 2) opt = mock_optimizer(self, hidden_units=hidden_units) dnn_regressor = self._dnn_regressor_fn( hidden_units=hidden_units, - feature_columns=(feature_column.numeric_column('age'),), + feature_columns=(age_column,), optimizer=opt, model_dir=self._model_dir) self.assertEqual(0, opt.minimize.call_count) @@ -1849,6 +1975,9 @@ class BaseDNNRegressorTrainTest(object): def test_one_dim(self): """Asserts train loss for one-dimensional input and logits.""" + age_column = feature_column.numeric_column('age') + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column('age') base_global_step = 100 hidden_units = (2, 2) create_checkpoint( @@ -1864,7 +1993,7 @@ class BaseDNNRegressorTrainTest(object): self, hidden_units=hidden_units, expected_loss=expected_loss) dnn_regressor = self._dnn_regressor_fn( hidden_units=hidden_units, - feature_columns=(feature_column.numeric_column('age'),), + feature_columns=(age_column,), optimizer=opt, model_dir=self._model_dir) self.assertEqual(0, opt.minimize.call_count) @@ -1911,13 +2040,17 @@ class BaseDNNRegressorTrainTest(object): # See that test for calculation of logits. # logits = [[-0.48, 0.48, 0.39]] # loss = (1+0.48)^2 + (-1-0.48)^2 + (0.5-0.39)^2 = 4.3929 + age_column = feature_column.numeric_column('age', shape=[input_dimension]) + if self._is_fc_v2: + age_column = feature_column_v2.numeric_column( + 'age', shape=[input_dimension]) + expected_loss = 4.3929 opt = mock_optimizer( self, hidden_units=hidden_units, expected_loss=expected_loss) dnn_regressor = self._dnn_regressor_fn( hidden_units=hidden_units, - feature_columns=[ - feature_column.numeric_column('age', shape=[input_dimension])], + feature_columns=[age_column], label_dimension=label_dimension, optimizer=opt, model_dir=self._model_dir) |