aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <nobody@tensorflow.org>2016-05-31 16:22:39 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-05-31 17:36:59 -0700
commit424eea4d523712cc9735d4919ea09713c78007e8 (patch)
tree65645840c1b84b1c1d3e2be759d6e1fd79513b56
parente456991cba729375023a8081b78970bf10bf3ccb (diff)
Remove train, and add input_fn to fit (to make it consistent with evaluate).
Change: 123701575
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/base.py5
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/classifier_test.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/dnn.py6
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py24
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/dnn_test.py6
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/estimator.py99
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/estimator_test.py15
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/linear.py8
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/linear_test.py28
9 files changed, 114 insertions, 79 deletions
diff --git a/tensorflow/contrib/learn/python/learn/estimators/base.py b/tensorflow/contrib/learn/python/learn/estimators/base.py
index 8e2bf70c90..88df90cbdb 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/base.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/base.py
@@ -319,7 +319,7 @@ class TensorFlowEstimator(estimator.Estimator):
raise ValueError("Restore folder doesn't contain model definition.")
# list of parameters that are allowed to be reconfigured
reconfigurable_params = ['_config']
- _config = config
+ _config = config # pylint: disable=unused-variable,invalid-name
with gfile.Open(model_def_filename) as fmodel:
model_def = json.loads(fmodel.read())
# TensorFlow binding requires parameters to be strings not unicode.
@@ -390,15 +390,18 @@ class TensorFlowBaseTransformer(TensorFlowEstimator, _sklearn.TransformerMixin):
"""TensorFlow Base Transformer class."""
def transform(self, X): # pylint: disable=invalid-name
+ # pylint: disable=invalid-name
"""Transform X using trained transformer."""
return(super(TensorFlowBaseTransformer, self).predict(
X, axis=1, batch_size=None))
def fit(self, X, y=None, monitor=None, logdir=None):
+ # pylint: disable=invalid-name
"""Fit a transformer."""
return(super(TensorFlowBaseTransformer, self).fit(
X, y, monitors=None, logdir=None))
def fit_transform(self, X, y=None, monitor=None, logdir=None):
+ # pylint: disable=invalid-name
"""Fit transformer and transform X using trained transformer."""
return self.fit(X, y, monitor=None, logdir=None).transform(X)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/classifier_test.py b/tensorflow/contrib/learn/python/learn/estimators/classifier_test.py
index a2efcd1f03..ca8d39c2a7 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/classifier_test.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/classifier_test.py
@@ -64,7 +64,7 @@ class ClassifierTest(tf.test.TestCase):
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
- est.train(input_fn=iris_input_fn, steps=100)
+ est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = est.predict(x=iris.data)
self.assertEqual(predictions.shape[0], iris.target.shape[0])
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dnn.py b/tensorflow/contrib/learn/python/learn/estimators/dnn.py
index cf952f6d8b..9673c3f36e 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dnn.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dnn.py
@@ -49,7 +49,7 @@ class DNNClassifier(dnn_linear_combined.DNNLinearCombinedClassifier):
# Input builders
def input_fn_train: # returns X, Y
pass
- estimator.train(input_fn_train)
+ estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns X, Y
pass
@@ -130,7 +130,7 @@ class DNNRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
# Input builders
def input_fn_train: # returns X, Y
pass
- estimator.train(input_fn_train)
+ estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns X, Y
pass
@@ -246,6 +246,7 @@ class TensorFlowDNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
verbose=verbose)
def _model_fn(self, X, y): # pylint: disable=invalid-name
+ # pylint: disable=invalid-name
return models.get_dnn_model(self.hidden_units,
models.logistic_regression,
dropout=self.dropout)(X, y)
@@ -320,6 +321,7 @@ class TensorFlowDNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
verbose=verbose)
def _model_fn(self, X, y): # pylint: disable=invalid-name
+ # pylint: disable=invalid-name
return models.get_dnn_model(self.hidden_units,
models.linear_regression,
dropout=self.dropout)(X, y)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py b/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
index 87093d62d2..0d9d528588 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
@@ -63,7 +63,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
- classifier.train(_iris_input_fn, steps=100)
+ classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
@@ -103,7 +103,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
- classifier.train(input_fn=_input_fn, steps=100)
+ classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
@@ -126,7 +126,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
- classifier.train(_iris_input_fn, steps=100)
+ classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
@@ -159,7 +159,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
- classifier.train(input_fn=_input_fn_train, steps=100)
+ classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval,
steps=100)
# If there is no weight column, model should learn y=Not(x). All examples in
@@ -187,7 +187,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
- classifier.train(input_fn=_input_fn_train, steps=100)
+ classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=100)
# If weight column is ignored, then accuracy should be 0.25. If it's not
# ignored, then it should be greater than 0.6.
@@ -209,7 +209,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_hidden_units=[3, 3],
dnn_optimizer=tf.train.AdagradOptimizer(learning_rate=0.1))
- classifier.train(_iris_input_fn, steps=100)
+ classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
@@ -229,7 +229,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
- classifier.train(_iris_input_fn, steps=100)
+ classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
@@ -250,7 +250,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
- classifier.train(input_fn=_input_fn_train, steps=100)
+ classifier.fit(input_fn=_input_fn_train, steps=100)
probs = classifier.predict_proba(input_fn=_input_fn_predict)
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.01)
classes = classifier.predict(input_fn=_input_fn_predict)
@@ -270,7 +270,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
- classifier.train(input_fn=_input_fn_train, steps=100)
+ classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn_train,
steps=100,
@@ -297,7 +297,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
- classifier.train(input_fn=_input_fn_train, steps=500)
+ classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
@@ -316,7 +316,7 @@ class DNNLinearCombinedClassifierTest(tf.test.TestCase):
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
- classifier.train(input_fn=_input_fn_train, steps=500)
+ classifier.fit(input_fn=_input_fn_train, steps=500)
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
@@ -340,7 +340,7 @@ class DNNLinearCombinedRegressorTest(tf.test.TestCase):
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
- classifier.train(input_fn=_input_fn_train, steps=100)
+ classifier.fit(input_fn=_input_fn_train, steps=100)
classifier.evaluate(input_fn=_input_fn_train, steps=1)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dnn_test.py b/tensorflow/contrib/learn/python/learn/estimators/dnn_test.py
index fb357a042e..0a0c6a38e0 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dnn_test.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dnn_test.py
@@ -41,7 +41,7 @@ class DNNClassifierTest(tf.test.TestCase):
feature_columns=cont_features,
hidden_units=[3, 3])
- classifier.train(_iris_input_fn, steps=1000)
+ classifier.fit(input_fn=_iris_input_fn, steps=1000)
classifier.evaluate(input_fn=_iris_input_fn, steps=100)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertGreater(scores['accuracy/mean'], 0.6)
@@ -57,7 +57,7 @@ class DNNRegressorTest(tf.test.TestCase):
regressor = tf.contrib.learn.DNNRegressor(feature_columns=cont_features,
hidden_units=[3, 3])
- regressor.train(_iris_input_fn, steps=1000)
+ regressor.fit(input_fn=_iris_input_fn, steps=1000)
regressor.evaluate(input_fn=_iris_input_fn, steps=100)
@@ -72,7 +72,7 @@ class InferedColumnTest(tf.test.TestCase):
def testTrain(self):
est = tf.contrib.learn.DNNRegressor(hidden_units=[3, 3])
- est.train(input_fn=boston_input_fn, steps=1)
+ est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/estimator.py b/tensorflow/contrib/learn/python/learn/estimators/estimator.py
index 9707bbb718..2ce17fe43d 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/estimator.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/estimator.py
@@ -125,7 +125,9 @@ class BaseEstimator(sklearn.BaseEstimator):
self._graph = None
- def fit(self, x, y, steps, batch_size=32, monitors=None):
+ def fit(
+ self, x=None, y=None, input_fn=None, steps=None, batch_size=32,
+ monitors=None):
"""Trains a model given training data X and y.
Args:
@@ -135,38 +137,36 @@ class BaseEstimator(sklearn.BaseEstimator):
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
- steps: number of steps to train model for.
- batch_size: minibatch size to use on the input, defaults to 32.
+ input_fn: Input function. If set, `x` and `y` must be `None`.
+ steps: Number of steps for which to train model. If `None`, train forever.
+ batch_size: minibatch size to use on the input, defaults to 32. Ignored if
+ `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
- Final loss.
+ `self`, for chaining.
+
+ Raises:
+ ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
"""
- input_fn, feed_fn = _get_input_fn(x, y, batch_size)
- return self._train_model(input_fn=input_fn,
+ feed_fn = None
+ if input_fn is None:
+ if x is None:
+ raise ValueError('Either x or input_fn must be provided.')
+ input_fn, feed_fn = _get_input_fn(x, y, batch_size)
+ elif (x is not None) or (y is not None):
+ raise ValueError('Can not provide both input_fn and either of x and y.')
+ loss = self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitors=monitors)
+ logging.info('Loss for final step: %s.', loss)
+ return self
- # TODO(ptucker,ipolosukhin): Consider returning self and saving loss in
- # attribute. Ditto for evaluate.
- def train(self, input_fn, steps, monitors=None):
- """Trains a model given input builder function.
-
- Args:
- input_fn: Input builder function, returns tuple of dicts or
- dict and Tensor.
- steps: number of steps to train model for.
- monitors: List of `BaseMonitor` subclass instances. Used for callbacks
- inside the training loop.
-
- Returns:
- Final loss.
- """
- return self._train_model(input_fn=input_fn, steps=steps, monitors=monitors)
-
- def partial_fit(self, x, y, steps=1, batch_size=32, monitors=None):
+ def partial_fit(
+ self, x=None, y=None, input_fn=None, steps=1, batch_size=32,
+ monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
@@ -184,19 +184,32 @@ class BaseEstimator(sklearn.BaseEstimator):
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
- steps: number of steps to train model for.
- batch_size: minibatch size to use on the input, defaults to 32.
+ input_fn: Input function. If set, `x` and `y` must be `None`.
+ steps: Number of steps for which to train model. If `None`, train forever.
+ batch_size: minibatch size to use on the input, defaults to 32. Ignored if
+ `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
- Final loss.
+ `self`, for chaining.
+
+ Raises:
+ ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
"""
- input_fn, feed_fn = _get_input_fn(x, y, batch_size)
- return self._train_model(input_fn=input_fn,
+ feed_fn = None
+ if input_fn is None:
+ if x is None:
+ raise ValueError('Either x or input_fn must be provided.')
+ input_fn, feed_fn = _get_input_fn(x, y, batch_size)
+ elif (x is not None) or (y is not None):
+ raise ValueError('Can not provide both input_fn and either of x and y.')
+ loss = self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitors=monitors)
+ logging.info('Loss for final step: %s.', loss)
+ return self
def evaluate(self,
x=None,
@@ -212,12 +225,12 @@ class BaseEstimator(sklearn.BaseEstimator):
Args:
x: features.
y: targets.
- input_fn: Input function. If set, x and y must be None.
+ input_fn: Input function. If set, `x` and `y` must be `None`.
feed_fn: Function creating a feed dict every time it is called. Called
once per iteration.
- batch_size: minibatch size to use on the input, defaults to 32. Ignored
- if input_fn is set.
- steps: Number of steps to evalute for.
+ batch_size: minibatch size to use on the input, defaults to 32. Ignored if
+ `input_fn` is provided.
+ steps: Number of steps for which to train model. If `None`, train forever.
metrics: Dict of metric ops to run. If None, the default metric functions
are used; if {}, no metrics are used.
name: Name of the evaluation if user needs to run multiple evaluation on
@@ -227,13 +240,17 @@ class BaseEstimator(sklearn.BaseEstimator):
Returns `dict` with evaluation results.
Raises:
- ValueError: If x or y are not None while input_fn or feed_fn is not None.
+ ValueError: If `x` or `y` are not `None` while `input_fn` or `feed_fn` is
+ not `None`.
"""
- if (x is not None or y is not None) and input_fn is not None:
- raise ValueError('Either x and y or input_fn must be None.')
if input_fn is None:
- assert x is not None
+ if x is None:
+ raise ValueError('Either x or input_fn must be provided.')
+ if feed_fn is not None:
+ raise ValueError('Cannot provide both x and feed_fn.')
input_fn, feed_fn = _get_predict_input_fn(x, y, batch_size)
+ elif (x is not None) or (y is not None):
+ raise ValueError('Can not provide both input_fn and either of x and y.')
return self._evaluate_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
@@ -496,8 +513,14 @@ class BaseEstimator(sklearn.BaseEstimator):
outputs=None):
# Converts inputs into tf.DataFrame / tf.Series.
batch_size = -1 if batch_size is None else batch_size
- if x is not None:
+ if input_fn is None:
+ if x is None:
+ raise ValueError('Either x or input_fn must be provided.')
+ if feed_fn is not None:
+ raise ValueError('Cannot provide both x and feed_fn.')
input_fn, feed_fn = _get_predict_input_fn(x, None, batch_size)
+ elif x is not None:
+ raise ValueError('Can not provide both input_fn and x.')
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py b/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
index 1e320bbd7e..6497b0b82d 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
@@ -179,27 +179,27 @@ class EstimatorTest(tf.test.TestCase):
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_fn)
- est.train(input_fn=iris_input_fn, steps=100)
+ est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = est.predict(x=iris.data)['class']
self.assertEqual(predictions.shape[0], iris.target.shape[0])
def testTrainInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
- est.train(input_fn=boston_input_fn, steps=1)
+ est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredict(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
- est.train(input_fn=boston_input_fn, steps=1)
+ est.fit(input_fn=boston_input_fn, steps=1)
output = est.predict(boston.data)
self.assertEqual(output.shape[0], boston.target.shape[0])
def testPredictFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
- est.train(input_fn=boston_input_fn, steps=1)
+ est.fit(input_fn=boston_input_fn, steps=1)
output = est.predict(input_fn=boston_input_fn)
self.assertEqual(output.shape[0], boston.target.shape[0])
@@ -207,14 +207,13 @@ class EstimatorTest(tf.test.TestCase):
def other_input_fn():
return {'other': tf.constant([0, 0, 0])}, tf.constant([0, 0, 0])
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
- est.train(input_fn=boston_input_fn, steps=1)
+ est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
- est.train(input_fn=other_input_fn, steps=1)
+ est.fit(input_fn=other_input_fn, steps=1)
def testMonitors(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
- est.train(input_fn=boston_input_fn, steps=21,
- monitors=[CheckCallsMonitor()])
+ est.fit(input_fn=boston_input_fn, steps=21, monitors=[CheckCallsMonitor()])
if __name__ == '__main__':
diff --git a/tensorflow/contrib/learn/python/learn/estimators/linear.py b/tensorflow/contrib/learn/python/learn/estimators/linear.py
index 27543a51ea..073aaf4e5d 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/linear.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/linear.py
@@ -46,8 +46,8 @@ class LinearClassifier(dnn_linear_combined.DNNLinearCombinedClassifier):
...
def input_fn_eval: # returns X, Y
...
- estimator.train(input_fn_train)
- estimator.evaluate(input_fn_eval)
+ estimator.fit(input_fn=input_fn_train)
+ estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x)
```
@@ -124,8 +124,8 @@ class LinearRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
...
def input_fn_eval: # returns X, Y
...
- estimator.train(input_fn_train)
- estimator.evaluate(input_fn_eval)
+ estimator.fit(input_fn=input_fn_train)
+ estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x)
```
diff --git a/tensorflow/contrib/learn/python/learn/estimators/linear_test.py b/tensorflow/contrib/learn/python/learn/estimators/linear_test.py
index 1126c6250e..88eb20b347 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/linear_test.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/linear_test.py
@@ -41,9 +41,10 @@ class LinearClassifierTest(tf.test.TestCase):
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
- loss1 = classifier.train(input_fn, steps=100)
- loss2 = classifier.train(input_fn, steps=200)
-
+ classifier.fit(input_fn=input_fn, steps=100)
+ loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
+ classifier.fit(input_fn=input_fn, steps=200)
+ loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
@@ -64,8 +65,12 @@ class LinearClassifierTest(tf.test.TestCase):
feature_columns=[language],
optimizer=tf.train.FtrlOptimizer(learning_rate=1.0,
l1_regularization_strength=100.))
- loss_no_reg = classifier_no_reg.train(input_fn, steps=100)
- loss_with_reg = classifier_with_reg.train(input_fn, steps=100)
+ loss_no_reg = classifier_no_reg.fit(
+ input_fn=input_fn, steps=100).evaluate(
+ input_fn=input_fn, steps=1)['loss']
+ loss_with_reg = classifier_with_reg.fit(
+ input_fn=input_fn, steps=100).evaluate(
+ input_fn=input_fn, steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
@@ -80,7 +85,8 @@ class LinearClassifierTest(tf.test.TestCase):
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier = tf.contrib.learn.LinearClassifier(feature_columns=[language])
- loss = classifier.train(input_fn, steps=100)
+ classifier.fit(input_fn=input_fn, steps=100)
+ loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
def testEval(self):
@@ -101,7 +107,7 @@ class LinearClassifierTest(tf.test.TestCase):
feature_columns=[age, language])
# Evaluate on trained mdoel
- classifier.train(input_fn, steps=100)
+ classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=2)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
@@ -127,8 +133,10 @@ class LinearRegressorTest(tf.test.TestCase):
classifier = tf.contrib.learn.LinearRegressor(
feature_columns=[age, language])
- loss1 = classifier.train(input_fn, steps=100)
- loss2 = classifier.train(input_fn, steps=200)
+ classifier.fit(input_fn=input_fn, steps=100)
+ loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
+ classifier.fit(input_fn=input_fn, steps=200)
+ loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
@@ -145,7 +153,7 @@ class InferedColumnTest(tf.test.TestCase):
def testTrain(self):
est = tf.contrib.learn.LinearRegressor()
- est.train(input_fn=boston_input_fn, steps=1)
+ est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)