aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2016-06-30 17:54:25 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-06-30 19:04:03 -0700
commit1bcc32c99ca1fe8d08d02a27d9570233fdb92472 (patch)
tree77788e7a5d9de281dd5c2b4aa8c56684935dfb2e
parent3ff5ce17c4ab4578ba5e96fd823fbaa9be310e01 (diff)
Use learn.DNNClassifier for Iris examples. Use default graph and existing global step (if there is one) in Estimator, because they may have been created earlier when constructing an optimizer, as in the example iris_custom_decay_dnn.
Change: 126369635
-rw-r--r--tensorflow/examples/skflow/BUILD4
-rwxr-xr-xtensorflow/examples/skflow/examples_test.sh4
-rw-r--r--tensorflow/examples/skflow/iris_custom_decay_dnn.py43
-rw-r--r--tensorflow/examples/skflow/iris_run_config.py37
-rw-r--r--tensorflow/examples/skflow/iris_val_based_early_stopping.py8
-rw-r--r--tensorflow/examples/skflow/iris_with_pipeline.py31
6 files changed, 79 insertions, 48 deletions
diff --git a/tensorflow/examples/skflow/BUILD b/tensorflow/examples/skflow/BUILD
index 5d6eae8745..7cac13df98 100644
--- a/tensorflow/examples/skflow/BUILD
+++ b/tensorflow/examples/skflow/BUILD
@@ -231,7 +231,11 @@ sh_test(
data = [
":boston",
":iris",
+ ":iris_custom_decay_dnn",
":iris_custom_model",
+ ":iris_run_config",
+ ":iris_val_based_early_stopping",
+ ":iris_with_pipeline",
":text_classification",
":text_classification_builtin_rnn_model",
":text_classification_character_cnn",
diff --git a/tensorflow/examples/skflow/examples_test.sh b/tensorflow/examples/skflow/examples_test.sh
index da6b35c9bb..f4010c915e 100755
--- a/tensorflow/examples/skflow/examples_test.sh
+++ b/tensorflow/examples/skflow/examples_test.sh
@@ -49,6 +49,10 @@ function test() {
test boston
test iris
test iris_custom_model
+test iris_custom_decay_dnn
+test iris_run_config
+test iris_val_based_early_stopping
+test iris_with_pipeline
test text_classification --test_with_fake_data
test text_classification_builtin_rnn_model --test_with_fake_data
test text_classification_cnn --test_with_fake_data
diff --git a/tensorflow/examples/skflow/iris_custom_decay_dnn.py b/tensorflow/examples/skflow/iris_custom_decay_dnn.py
index c1e7d22d53..1ce6a830e4 100644
--- a/tensorflow/examples/skflow/iris_custom_decay_dnn.py
+++ b/tensorflow/examples/skflow/iris_custom_decay_dnn.py
@@ -17,24 +17,29 @@ from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
-
import tensorflow as tf
-iris = datasets.load_iris()
-X_train, X_test, y_train, y_test = train_test_split(iris.data,
- iris.target,
- test_size=0.2,
- random_state=42)
-# setup exponential decay function
-def exp_decay(global_step):
- return tf.train.exponential_decay(
- learning_rate=0.1, global_step=global_step,
- decay_steps=100, decay_rate=0.001)
-
-# use customized decay function in learning_rate
-optimizer = tf.train.AdagradOptimizer(learning_rate=exp_decay)
-classifier = tf.contrib.learn.DNNClassifier(hidden_units=[10, 20, 10],
- n_classes=3,
- optimizer=optimizer)
-classifier.fit(X_train, y_train, steps=800)
-score = metrics.accuracy_score(y_test, classifier.predict(X_test))
+
+def optimizer_exp_decay():
+ global_step = tf.contrib.framework.get_or_create_global_step()
+ learning_rate = tf.train.exponential_decay(
+ learning_rate=0.1, global_step=global_step,
+ decay_steps=100, decay_rate=0.001)
+ return tf.train.AdagradOptimizer(learning_rate=learning_rate)
+
+def main(unused_argv):
+ iris = datasets.load_iris()
+ x_train, x_test, y_train, y_test = train_test_split(
+ iris.data, iris.target, test_size=0.2, random_state=42)
+
+ classifier = tf.contrib.learn.DNNClassifier(hidden_units=[10, 20, 10],
+ n_classes=3,
+ optimizer=optimizer_exp_decay)
+
+ classifier.fit(x_train, y_train, steps=800)
+ score = metrics.accuracy_score(y_test, classifier.predict(x_test))
+ print('Accuracy: {0:f}'.format(score))
+
+
+if __name__ == '__main__':
+ tf.app.run()
diff --git a/tensorflow/examples/skflow/iris_run_config.py b/tensorflow/examples/skflow/iris_run_config.py
index dff0daf9e8..c678c7c738 100644
--- a/tensorflow/examples/skflow/iris_run_config.py
+++ b/tensorflow/examples/skflow/iris_run_config.py
@@ -16,24 +16,31 @@ from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics, cross_validation
+import tensorflow as tf
-from tensorflow.contrib import learn
+def main(unused_argv):
+ # Load dataset.
+ iris = datasets.load_iris()
+ x_train, x_test, y_train, y_test = cross_validation.train_test_split(
+ iris.data, iris.target, test_size=0.2, random_state=42)
-# Load dataset.
-iris = datasets.load_iris()
-X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
- test_size=0.2, random_state=42)
+ # You can define you configurations by providing a RunConfig object to
+ # estimator to control session configurations, e.g. num_cores
+ # and gpu_memory_fraction
+ run_config = tf.contrib.learn.estimators.RunConfig(
+ num_cores=3, gpu_memory_fraction=0.6)
-# You can define you configurations by providing a RunConfig object to
-# estimator to control session configurations, e.g. num_cores and gpu_memory_fraction
-run_config = learn.estimators.RunConfig(num_cores=3, gpu_memory_fraction=0.6)
+ # Build 3 layer DNN with 10, 20, 10 units respectively.
+ classifier = tf.contrib.learn.DNNClassifier(hidden_units=[10, 20, 10],
+ n_classes=3,
+ config=run_config)
-# Build 3 layer DNN with 10, 20, 10 units respectively.
-classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
- n_classes=3, steps=200, config=run_config)
+ # Fit and predict.
+ classifier.fit(x_train, y_train, steps=200)
+ score = metrics.accuracy_score(y_test, classifier.predict(x_test))
+ print('Accuracy: {0:f}'.format(score))
-# Fit and predict.
-classifier.fit(X_train, y_train)
-score = metrics.accuracy_score(y_test, classifier.predict(X_test))
-print('Accuracy: {0:f}'.format(score))
+
+if __name__ == '__main__':
+ tf.app.run()
diff --git a/tensorflow/examples/skflow/iris_val_based_early_stopping.py b/tensorflow/examples/skflow/iris_val_based_early_stopping.py
index 72e0595544..05dfa96a07 100644
--- a/tensorflow/examples/skflow/iris_val_based_early_stopping.py
+++ b/tensorflow/examples/skflow/iris_val_based_early_stopping.py
@@ -34,21 +34,23 @@ def main(unused_argv):
x_val, y_val, early_stopping_rounds=200)
# classifier with early stopping on training data
- classifier1 = learn.TensorFlowDNNClassifier(
+ classifier1 = learn.DNNClassifier(
hidden_units=[10, 20, 10], n_classes=3, model_dir='/tmp/iris_model/')
classifier1.fit(x=x_train, y=y_train, steps=2000)
score1 = metrics.accuracy_score(y_test, classifier1.predict(x_test))
# classifier with early stopping on validation data, save frequently for
# monitor to pick up new checkpoints.
- classifier2 = learn.TensorFlowDNNClassifier(
+ classifier2 = learn.DNNClassifier(
hidden_units=[10, 20, 10], n_classes=3, model_dir='/tmp/iris_model_val/',
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor])
score2 = metrics.accuracy_score(y_test, classifier2.predict(x_test))
# In many applications, the score is improved by using early stopping
- print(score2 > score1)
+ print('score1: ', score1)
+ print('score2: ', score2)
+ print('score2 > score1: ', score2 > score1)
if __name__ == '__main__':
diff --git a/tensorflow/examples/skflow/iris_with_pipeline.py b/tensorflow/examples/skflow/iris_with_pipeline.py
index 3ba5739250..5535cd9e3b 100644
--- a/tensorflow/examples/skflow/iris_with_pipeline.py
+++ b/tensorflow/examples/skflow/iris_with_pipeline.py
@@ -20,22 +20,31 @@ from sklearn.datasets import load_iris
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
+import tensorflow as tf
+
from tensorflow.contrib import learn
-iris = load_iris()
-X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
- test_size=0.2, random_state=42)
-# It's useful to scale to ensure Stochastic Gradient Descent will do the right thing
-scaler = StandardScaler()
+def main(unused_argv):
+ iris = load_iris()
+ x_train, x_test, y_train, y_test = cross_validation.train_test_split(
+ iris.data, iris.target, test_size=0.2, random_state=42)
+
+ # It's useful to scale to ensure Stochastic Gradient Descent
+ # will do the right thing.
+ scaler = StandardScaler()
+
+ # DNN classifier
+ classifier = learn.DNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
-# DNN classifier
-DNNclassifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3, steps=200)
+ pipeline = Pipeline([('scaler', scaler),
+ ('DNNclassifier', classifier)])
-pipeline = Pipeline([('scaler', scaler), ('DNNclassifier', DNNclassifier)])
+ pipeline.fit(x_train, y_train, DNNclassifier__steps=200)
-pipeline.fit(X_train, y_train)
+ score = accuracy_score(y_test, pipeline.predict(x_test))
+ print('Accuracy: {0:f}'.format(score))
-score = accuracy_score(y_test, pipeline.predict(X_test))
-print('Accuracy: {0:f}'.format(score))
+if __name__ == '__main__':
+ tf.app.run()