aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Mustafa Ispir <ispir@google.com>2016-12-08 13:08:09 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-12-08 13:25:26 -0800
commit5b4c479a05782de156d4c24e579caec63444001a (patch)
tree2700b3d4fc27dd658b0f697b895113eb6811dc3c
parent3d34a116239b88853e68e85bd31cac7ef0b13d57 (diff)
Added config to the wide-n-deep model-fns.
Change: 141474204
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/dnn.py17
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py15
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/linear.py13
3 files changed, 9 insertions, 36 deletions
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dnn.py b/tensorflow/contrib/learn/python/learn/estimators/dnn.py
index 6f00c21673..fa73b64cc6 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dnn.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dnn.py
@@ -41,7 +41,6 @@ from tensorflow.python import summary
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
-from tensorflow.python.platform import tf_logging as logging
_CENTERED_BIAS_WEIGHT = "centered_bias_weight"
@@ -69,7 +68,7 @@ def _add_hidden_layer_summary(value, tag):
summary.histogram("%s_activation" % tag, value)
-def _dnn_model_fn(features, labels, mode, params):
+def _dnn_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net model_fn.
Args:
@@ -93,10 +92,10 @@ def _dnn_model_fn(features, labels, mode, params):
coordinate.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
- * num_ps_replicas: The number of parameter server replicas.
* embedding_lr_multipliers: Optional. A dictionary from
`EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
multiply with learning rate for the embedding variables.
+ config: `RunConfig` object to configure the runtime settings.
Returns:
predictions: A dict of `Tensor` objects.
@@ -110,7 +109,7 @@ def _dnn_model_fn(features, labels, mode, params):
activation_fn = params.get("activation_fn")
dropout = params.get("dropout")
gradient_clip_norm = params.get("gradient_clip_norm")
- num_ps_replicas = params.get("num_ps_replicas", 0)
+ num_ps_replicas = config.num_ps_replicas if config else 0
embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})
features = _get_feature_dict(features)
@@ -298,10 +297,6 @@ class DNNClassifier(evaluable.Evaluable, trainable.Trainable):
self._hidden_units = hidden_units
self._feature_columns = feature_columns
self._enable_centered_bias = enable_centered_bias
- if config is None:
- config = estimator.BaseEstimator._Config() # pylint: disable=protected-access
- logging.info("Using default config.")
-
self._estimator = estimator.Estimator(
model_fn=_dnn_model_fn,
model_dir=model_dir,
@@ -318,7 +313,6 @@ class DNNClassifier(evaluable.Evaluable, trainable.Trainable):
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
- "num_ps_replicas": config.num_ps_replicas if config else 0,
"embedding_lr_multipliers": embedding_lr_multipliers,
},
feature_engineering_fn=feature_engineering_fn)
@@ -615,10 +609,6 @@ class DNNRegressor(evaluable.Evaluable, trainable.Trainable):
A `DNNRegressor` estimator.
"""
self._feature_columns = feature_columns
- if config is None:
- config = estimator.BaseEstimator._Config() # pylint: disable=protected-access
- logging.info("Using default config.")
-
self._estimator = estimator.Estimator(
model_fn=_dnn_model_fn,
model_dir=model_dir,
@@ -634,7 +624,6 @@ class DNNRegressor(evaluable.Evaluable, trainable.Trainable):
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
- "num_ps_replicas": config.num_ps_replicas if config else 0,
"embedding_lr_multipliers": embedding_lr_multipliers,
},
feature_engineering_fn=feature_engineering_fn)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py b/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
index 77bba26d90..0974683ad3 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
@@ -407,7 +407,7 @@ def _extract_embedding_lr_multipliers(embedding_lr_multipliers, collection_key,
return gradient_multipliers
-def _dnn_linear_combined_model_fn(features, labels, mode, params):
+def _dnn_linear_combined_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net and Linear combined model_fn.
Args:
@@ -441,6 +441,7 @@ def _dnn_linear_combined_model_fn(features, labels, mode, params):
* embedding_lr_multipliers: Optional. A dictionary from
`EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
multiply with learning rate for the embedding variables.
+ config: `RunConfig` object to configure the runtime settings.
Returns:
`ModelFnOps`
@@ -459,7 +460,7 @@ def _dnn_linear_combined_model_fn(features, labels, mode, params):
dnn_activation_fn = params.get("dnn_activation_fn")
dnn_dropout = params.get("dnn_dropout")
gradient_clip_norm = params.get("gradient_clip_norm")
- num_ps_replicas = params.get("num_ps_replicas", 0)
+ num_ps_replicas = config.num_ps_replicas if config else 0
embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})
if not linear_feature_columns and not dnn_feature_columns:
@@ -728,10 +729,6 @@ class DNNLinearCombinedClassifier(evaluable.Evaluable, trainable.Trainable):
"must be defined.")
self._dnn_hidden_units = dnn_hidden_units
self._enable_centered_bias = enable_centered_bias
- if config is None:
- config = estimator.BaseEstimator._Config() # pylint: disable=protected-access
- logging.info("Using default config.")
-
head = head_lib._multi_class_head( # pylint: disable=protected-access
n_classes=n_classes,
weight_column_name=weight_column_name,
@@ -751,7 +748,6 @@ class DNNLinearCombinedClassifier(evaluable.Evaluable, trainable.Trainable):
"dnn_activation_fn": dnn_activation_fn,
"dnn_dropout": dnn_dropout,
"gradient_clip_norm": gradient_clip_norm,
- "num_ps_replicas": config.num_ps_replicas if config else 0,
"embedding_lr_multipliers": embedding_lr_multipliers,
},
feature_engineering_fn=feature_engineering_fn)
@@ -1103,10 +1099,6 @@ class DNNLinearCombinedRegressor(evaluable.Evaluable, trainable.Trainable):
if not self._feature_columns:
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"must be defined.")
- if config is None:
- config = estimator.BaseEstimator._Config() # pylint: disable=protected-access
- logging.info("Using default config.")
-
head = head_lib._regression_head( # pylint: disable=protected-access
weight_column_name=weight_column_name,
label_dimension=label_dimension,
@@ -1126,7 +1118,6 @@ class DNNLinearCombinedRegressor(evaluable.Evaluable, trainable.Trainable):
"dnn_activation_fn": dnn_activation_fn,
"dnn_dropout": dnn_dropout,
"gradient_clip_norm": gradient_clip_norm,
- "num_ps_replicas": config.num_ps_replicas if config else 0,
"embedding_lr_multipliers": embedding_lr_multipliers,
},
feature_engineering_fn=feature_engineering_fn)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/linear.py b/tensorflow/contrib/learn/python/learn/estimators/linear.py
index 545a156e56..45e430717f 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/linear.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/linear.py
@@ -82,7 +82,7 @@ def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
columns_to_variables[bias_column] = [bias_variable]
-def _linear_model_fn(features, labels, mode, params):
+def _linear_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
@@ -105,6 +105,7 @@ def _linear_model_fn(features, labels, mode, params):
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
+ config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
@@ -116,7 +117,7 @@ def _linear_model_fn(features, labels, mode, params):
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
gradient_clip_norm = params.get("gradient_clip_norm", None)
- num_ps_replicas = params.get("num_ps_replicas", 0)
+ num_ps_replicas = config.num_ps_replicas if config else 0
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
@@ -383,9 +384,6 @@ class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
self._optimizer = _get_default_optimizer(feature_columns)
if optimizer:
self._optimizer = _get_optimizer(optimizer)
- if config is None:
- config = estimator.BaseEstimator._Config() # pylint: disable=protected-access
- logging.info("Using default config.")
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
@@ -420,7 +418,6 @@ class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
- "num_ps_replicas": config.num_ps_replicas if config else 0,
"joint_weights": _joint_weight,
})
@@ -666,9 +663,6 @@ class LinearRegressor(evaluable.Evaluable, trainable.Trainable):
self._optimizer = _get_default_optimizer(feature_columns)
if optimizer:
self._optimizer = _get_optimizer(optimizer)
- if config is None:
- config = estimator.BaseEstimator._Config() # pylint: disable=protected-access
- logging.info("Using default config.")
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
@@ -703,7 +697,6 @@ class LinearRegressor(evaluable.Evaluable, trainable.Trainable):
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
- "num_ps_replicas": config.num_ps_replicas if config else 0,
"joint_weights": _joint_weights,
})