aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/examples/learn
diff options
context:
space:
mode:
authorGravatar Justine Tunney <jart@google.com>2016-12-29 22:46:24 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-12-29 23:06:59 -0800
commite121667dc609de978a223c56ee906368d2c4ceef (patch)
tree7d4e1f1e1b4fd469487872c0cd34ddace5ac570c /tensorflow/examples/learn
parent7815fcba7767aa1eb3196c5861e174f8b3c43bab (diff)
Remove so many more hourglass imports
Change: 143230429
Diffstat (limited to 'tensorflow/examples/learn')
-rw-r--r--tensorflow/examples/learn/BUILD6
-rw-r--r--tensorflow/examples/learn/boston.py12
-rw-r--r--tensorflow/examples/learn/hdf5_classification.py5
-rw-r--r--tensorflow/examples/learn/iris.py9
-rw-r--r--tensorflow/examples/learn/iris_custom_model.py24
-rw-r--r--tensorflow/examples/learn/iris_val_based_early_stopping.py11
-rw-r--r--tensorflow/examples/learn/iris_with_pipeline.py9
-rw-r--r--tensorflow/examples/learn/mnist.py41
-rw-r--r--tensorflow/examples/learn/multiple_gpu.py24
-rwxr-xr-xtensorflow/examples/learn/resnet.py108
-rw-r--r--tensorflow/examples/learn/text_classification.py38
-rw-r--r--tensorflow/examples/learn/text_classification_character_cnn.py38
-rw-r--r--tensorflow/examples/learn/text_classification_character_rnn.py27
-rw-r--r--tensorflow/examples/learn/text_classification_cnn.py36
14 files changed, 227 insertions, 161 deletions
diff --git a/tensorflow/examples/learn/BUILD b/tensorflow/examples/learn/BUILD
index 99ec37993d..d6dae9a9f8 100644
--- a/tensorflow/examples/learn/BUILD
+++ b/tensorflow/examples/learn/BUILD
@@ -26,6 +26,7 @@ py_binary(
deps = [
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/learn",
+ "//third_party/py/numpy",
],
)
@@ -118,6 +119,7 @@ py_binary(
deps = [
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/learn",
+ "//third_party/py/numpy",
],
)
@@ -128,6 +130,7 @@ py_binary(
deps = [
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/learn",
+ "//third_party/py/numpy",
],
)
@@ -138,6 +141,7 @@ py_binary(
deps = [
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/learn",
+ "//third_party/py/numpy",
],
)
@@ -148,6 +152,7 @@ py_binary(
deps = [
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/learn",
+ "//third_party/py/numpy",
],
)
@@ -171,6 +176,7 @@ py_binary(
"//tensorflow/contrib/layers:layers_py",
"//tensorflow/contrib/learn",
"//tensorflow/examples/tutorials/mnist:input_data",
+ "//third_party/py/numpy",
],
)
diff --git a/tensorflow/examples/learn/boston.py b/tensorflow/examples/learn/boston.py
index 10b6305443..5d5ddff564 100644
--- a/tensorflow/examples/learn/boston.py
+++ b/tensorflow/examples/learn/boston.py
@@ -20,12 +20,11 @@ from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
import tensorflow as tf
-from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset
- boston = learn.datasets.load_dataset('boston')
+ boston = tf.contrib.learn.datasets.load_dataset('boston')
x, y = boston.data, boston.target
# Split dataset into train / test
@@ -37,8 +36,9 @@ def main(unused_argv):
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
- feature_columns = learn.infer_real_valued_columns_from_input(x_train)
- regressor = learn.DNNRegressor(
+ feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
+ x_train)
+ regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Fit
@@ -46,7 +46,8 @@ def main(unused_argv):
# Predict and score
y_predicted = list(
- regressor.predict(scaler.transform(x_test), as_iterable=True))
+ regressor.predict(
+ scaler.transform(x_test), as_iterable=True))
score = metrics.mean_squared_error(y_predicted, y_test)
print('MSE: {0:f}'.format(score))
@@ -54,4 +55,3 @@ def main(unused_argv):
if __name__ == '__main__':
tf.app.run()
-
diff --git a/tensorflow/examples/learn/hdf5_classification.py b/tensorflow/examples/learn/hdf5_classification.py
index e9c0cf63d7..db37500246 100644
--- a/tensorflow/examples/learn/hdf5_classification.py
+++ b/tensorflow/examples/learn/hdf5_classification.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Example of DNNClassifier for Iris plant dataset, h5 format."""
from __future__ import absolute_import
@@ -22,9 +21,10 @@ import numpy as np
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
-from tensorflow.contrib import learn
import h5py # pylint: disable=g-bad-import-order
+learn = tf.contrib.learn
+
def main(unused_argv):
# Load dataset.
@@ -57,5 +57,6 @@ def main(unused_argv):
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
+
if __name__ == '__main__':
tf.app.run()
diff --git a/tensorflow/examples/learn/iris.py b/tensorflow/examples/learn/iris.py
index 957c91c2b3..ad01f3544a 100644
--- a/tensorflow/examples/learn/iris.py
+++ b/tensorflow/examples/learn/iris.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
@@ -20,18 +19,18 @@ from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
-from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset.
- iris = learn.datasets.load_dataset('iris')
+ iris = tf.contrib.learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
- feature_columns = learn.infer_real_valued_columns_from_input(x_train)
- classifier = learn.DNNClassifier(
+ feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
+ x_train)
+ classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
diff --git a/tensorflow/examples/learn/iris_custom_model.py b/tensorflow/examples/learn/iris_custom_model.py
index bfe5238eba..31fb88954b 100644
--- a/tensorflow/examples/learn/iris_custom_model.py
+++ b/tensorflow/examples/learn/iris_custom_model.py
@@ -20,8 +20,9 @@ from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
-from tensorflow.contrib import layers
-from tensorflow.contrib import learn
+
+layers = tf.contrib.layers
+learn = tf.contrib.learn
def my_model(features, target):
@@ -34,9 +35,11 @@ def my_model(features, target):
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
- features = layers.stack(features, layers.fully_connected, [10, 20, 10],
- normalizer_fn=normalizer_fn,
- normalizer_params=normalizer_params)
+ features = layers.stack(
+ features,
+ layers.fully_connected, [10, 20, 10],
+ normalizer_fn=normalizer_fn,
+ normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
@@ -44,12 +47,15 @@ def my_model(features, target):
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
- loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
+ loss,
+ tf.contrib.framework.get_global_step(),
+ optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
- 'prob': tf.nn.softmax(logits)}, loss, train_op)
+ 'prob': tf.nn.softmax(logits)
+ }, loss, train_op)
def main(unused_argv):
@@ -61,7 +67,9 @@ def main(unused_argv):
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
- p['class'] for p in classifier.predict(x_test, as_iterable=True)]
+ p['class'] for p in classifier.predict(
+ x_test, as_iterable=True)
+ ]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
diff --git a/tensorflow/examples/learn/iris_val_based_early_stopping.py b/tensorflow/examples/learn/iris_val_based_early_stopping.py
index 3d0129c735..991d1831d7 100644
--- a/tensorflow/examples/learn/iris_val_based_early_stopping.py
+++ b/tensorflow/examples/learn/iris_val_based_early_stopping.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Example of DNNClassifier for Iris plant dataset, with early stopping."""
from __future__ import absolute_import
@@ -25,7 +24,7 @@ from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
-from tensorflow.contrib import learn
+learn = tf.contrib.learn
def clean_folder(folder):
@@ -52,7 +51,9 @@ def main(unused_argv):
# classifier with early stopping on training data
classifier1 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
- hidden_units=[10, 20, 10], n_classes=3, model_dir=model_dir)
+ hidden_units=[10, 20, 10],
+ n_classes=3,
+ model_dir=model_dir)
classifier1.fit(x=x_train, y=y_train, steps=2000)
predictions1 = list(classifier1.predict(x_test, as_iterable=True))
score1 = metrics.accuracy_score(y_test, predictions1)
@@ -64,7 +65,9 @@ def main(unused_argv):
# monitor to pick up new checkpoints.
classifier2 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
- hidden_units=[10, 20, 10], n_classes=3, model_dir=model_dir,
+ hidden_units=[10, 20, 10],
+ n_classes=3,
+ model_dir=model_dir,
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor])
predictions2 = list(classifier2.predict(x_test, as_iterable=True))
diff --git a/tensorflow/examples/learn/iris_with_pipeline.py b/tensorflow/examples/learn/iris_with_pipeline.py
index 94cfbceee0..7ba958d85b 100644
--- a/tensorflow/examples/learn/iris_with_pipeline.py
+++ b/tensorflow/examples/learn/iris_with_pipeline.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Example of DNNClassifier for Iris plant dataset, with pipeline."""
from __future__ import absolute_import
@@ -25,7 +24,7 @@ from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
-from tensorflow.contrib import learn
+learn = tf.contrib.learn
def main(unused_argv):
@@ -40,10 +39,10 @@ def main(unused_argv):
# DNN classifier.
classifier = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
- hidden_units=[10, 20, 10], n_classes=3)
+ hidden_units=[10, 20, 10],
+ n_classes=3)
- pipeline = Pipeline([('scaler', scaler),
- ('DNNclassifier', classifier)])
+ pipeline = Pipeline([('scaler', scaler), ('DNNclassifier', classifier)])
pipeline.fit(x_train, y_train, DNNclassifier__steps=200)
diff --git a/tensorflow/examples/learn/mnist.py b/tensorflow/examples/learn/mnist.py
index 8b416373ba..4b3f1835e2 100644
--- a/tensorflow/examples/learn/mnist.py
+++ b/tensorflow/examples/learn/mnist.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
@@ -25,8 +24,9 @@ from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
-from tensorflow.contrib import layers
-from tensorflow.contrib import learn
+
+layers = tf.contrib.layers
+learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
@@ -46,14 +46,14 @@ def conv_model(feature, target, mode):
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
- h_conv1 = layers.convolution(feature, 32, kernel_size=[5, 5],
- activation_fn=tf.nn.relu)
+ h_conv1 = layers.convolution(
+ feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
- h_conv2 = layers.convolution(h_pool1, 64, kernel_size=[5, 5],
- activation_fn=tf.nn.relu)
+ h_conv2 = layers.convolution(
+ h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
@@ -61,7 +61,8 @@ def conv_model(feature, target, mode):
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
- h_pool2_flat, 1024, activation_fn=tf.nn.relu), keep_prob=0.5,
+ h_pool2_flat, 1024, activation_fn=tf.nn.relu),
+ keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
@@ -70,7 +71,9 @@ def conv_model(feature, target, mode):
# Create a tensor for training op.
train_op = layers.optimize_loss(
- loss, tf.contrib.framework.get_global_step(), optimizer='SGD',
+ loss,
+ tf.contrib.framework.get_global_step(),
+ optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
@@ -85,18 +88,22 @@ def main(unused_args):
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
- classifier.fit(mnist.train.images, mnist.train.labels.astype(np.int32),
- batch_size=100, steps=1000)
- score = metrics.accuracy_score(
- mnist.test.labels, list(classifier.predict(mnist.test.images)))
+ classifier.fit(mnist.train.images,
+ mnist.train.labels.astype(np.int32),
+ batch_size=100,
+ steps=1000)
+ score = metrics.accuracy_score(mnist.test.labels,
+ list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
- classifier.fit(mnist.train.images, mnist.train.labels,
- batch_size=100, steps=20000)
- score = metrics.accuracy_score(
- mnist.test.labels, list(classifier.predict(mnist.test.images)))
+ classifier.fit(mnist.train.images,
+ mnist.train.labels,
+ batch_size=100,
+ steps=20000)
+ score = metrics.accuracy_score(mnist.test.labels,
+ list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
diff --git a/tensorflow/examples/learn/multiple_gpu.py b/tensorflow/examples/learn/multiple_gpu.py
index 6647ec3d42..a395d94151 100644
--- a/tensorflow/examples/learn/multiple_gpu.py
+++ b/tensorflow/examples/learn/multiple_gpu.py
@@ -24,8 +24,9 @@ from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
-from tensorflow.contrib import layers
-from tensorflow.contrib import learn
+
+layers = tf.contrib.layers
+learn = tf.contrib.learn
def my_model(features, target):
@@ -50,9 +51,11 @@ def my_model(features, target):
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.5}
with tf.device('/gpu:1'):
- features = layers.stack(features, layers.fully_connected, [10, 20, 10],
- normalizer_fn=normalizer_fn,
- normalizer_params=normalizer_params)
+ features = layers.stack(
+ features,
+ layers.fully_connected, [10, 20, 10],
+ normalizer_fn=normalizer_fn,
+ normalizer_params=normalizer_params)
with tf.device('/gpu:2'):
# Compute logits (1 per class) and compute loss.
@@ -61,12 +64,15 @@ def my_model(features, target):
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
- loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
+ loss,
+ tf.contrib.framework.get_global_step(),
+ optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
- 'prob': tf.nn.softmax(logits)}, loss, train_op)
+ 'prob': tf.nn.softmax(logits)
+ }, loss, train_op)
def main(unused_argv):
@@ -78,7 +84,9 @@ def main(unused_argv):
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
- p['class'] for p in classifier.predict(x_test, as_iterable=True)]
+ p['class'] for p in classifier.predict(
+ x_test, as_iterable=True)
+ ]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
diff --git a/tensorflow/examples/learn/resnet.py b/tensorflow/examples/learn/resnet.py
index fe1a07ccfa..49d89ef660 100755
--- a/tensorflow/examples/learn/resnet.py
+++ b/tensorflow/examples/learn/resnet.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""This example builds deep residual network for mnist data.
Reference Paper: http://arxiv.org/pdf/1512.03385.pdf
@@ -29,8 +28,9 @@ from math import sqrt
import os
import tensorflow as tf
-from tensorflow.contrib import learn
-from tensorflow.contrib.layers import batch_norm, convolution2d
+
+batch_norm = tf.contrib.layers.batch_norm
+convolution2d = tf.contrib.layers.convolution2d
def res_net(x, y, activation=tf.nn.relu):
@@ -52,12 +52,12 @@ def res_net(x, y, activation=tf.nn.relu):
"""
# Configurations for each bottleneck group.
- BottleneckGroup = namedtuple(
- 'BottleneckGroup', ['num_blocks', 'num_filters', 'bottleneck_size'])
- groups = [BottleneckGroup(3, 128, 32),
- BottleneckGroup(3, 256, 64),
- BottleneckGroup(3, 512, 128),
- BottleneckGroup(3, 1024, 256)]
+ BottleneckGroup = namedtuple('BottleneckGroup',
+ ['num_blocks', 'num_filters', 'bottleneck_size'])
+ groups = [
+ BottleneckGroup(3, 128, 32), BottleneckGroup(3, 256, 64),
+ BottleneckGroup(3, 512, 128), BottleneckGroup(3, 1024, 256)
+ ]
input_shape = x.get_shape().as_list()
@@ -68,18 +68,15 @@ def res_net(x, y, activation=tf.nn.relu):
# First convolution expands to 64 channels
with tf.variable_scope('conv_layer1'):
- net = convolution2d(x, 64, 7,
- normalizer_fn=batch_norm,
- activation_fn=activation)
+ net = convolution2d(
+ x, 64, 7, normalizer_fn=batch_norm, activation_fn=activation)
# Max pool
- net = tf.nn.max_pool(
- net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
+ net = tf.nn.max_pool(net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# First chain of resnets
with tf.variable_scope('conv_layer2'):
- net = convolution2d(net, groups[0].num_filters, 1,
- padding='VALID')
+ net = convolution2d(net, groups[0].num_filters, 1, padding='VALID')
# Create the bottleneck groups, each of which contains `num_blocks`
# bottleneck groups.
@@ -89,24 +86,33 @@ def res_net(x, y, activation=tf.nn.relu):
# 1x1 convolution responsible for reducing dimension
with tf.variable_scope(name + '/conv_in'):
- conv = convolution2d(net, group.bottleneck_size, 1,
- padding='VALID',
- activation_fn=activation,
- normalizer_fn=batch_norm)
+ conv = convolution2d(
+ net,
+ group.bottleneck_size,
+ 1,
+ padding='VALID',
+ activation_fn=activation,
+ normalizer_fn=batch_norm)
with tf.variable_scope(name + '/conv_bottleneck'):
- conv = convolution2d(conv, group.bottleneck_size, 3,
- padding='SAME',
- activation_fn=activation,
- normalizer_fn=batch_norm)
+ conv = convolution2d(
+ conv,
+ group.bottleneck_size,
+ 3,
+ padding='SAME',
+ activation_fn=activation,
+ normalizer_fn=batch_norm)
# 1x1 convolution responsible for restoring dimension
with tf.variable_scope(name + '/conv_out'):
input_dim = net.get_shape()[-1].value
- conv = convolution2d(conv, input_dim, 1,
- padding='VALID',
- activation_fn=activation,
- normalizer_fn=batch_norm)
+ conv = convolution2d(
+ conv,
+ input_dim,
+ 1,
+ padding='VALID',
+ activation_fn=activation,
+ normalizer_fn=batch_norm)
# shortcut connections that turn the network into its counterpart
# residual function (identity shortcut)
@@ -116,17 +122,22 @@ def res_net(x, y, activation=tf.nn.relu):
# upscale to the next group size
next_group = groups[group_i + 1]
with tf.variable_scope('block_%d/conv_upscale' % group_i):
- net = convolution2d(net, next_group.num_filters, 1,
- activation_fn=None,
- biases_initializer=None,
- padding='SAME')
+ net = convolution2d(
+ net,
+ next_group.num_filters,
+ 1,
+ activation_fn=None,
+ biases_initializer=None,
+ padding='SAME')
except IndexError:
pass
net_shape = net.get_shape().as_list()
- net = tf.nn.avg_pool(net,
- ksize=[1, net_shape[1], net_shape[2], 1],
- strides=[1, 1, 1, 1], padding='VALID')
+ net = tf.nn.avg_pool(
+ net,
+ ksize=[1, net_shape[1], net_shape[2], 1],
+ strides=[1, 1, 1, 1],
+ padding='VALID')
net_shape = net.get_shape().as_list()
net = tf.reshape(net, [-1, net_shape[1] * net_shape[2] * net_shape[3]])
@@ -143,29 +154,36 @@ def res_net_model(x, y):
accuracy = tf.equal(predicted, tf.cast(y, tf.int64))
predictions = {'prob': prediction, 'class': predicted, 'accuracy': accuracy}
train_op = tf.contrib.layers.optimize_loss(
- loss, tf.contrib.framework.get_global_step(),
- optimizer='Adagrad', learning_rate=0.001)
+ loss,
+ tf.contrib.framework.get_global_step(),
+ optimizer='Adagrad',
+ learning_rate=0.001)
return predictions, loss, train_op
+
# Download and load MNIST data.
-mnist = learn.datasets.load_dataset('mnist')
+mnist = tf.contrib.learn.datasets.load_dataset('mnist')
# Create a new resnet classifier.
-classifier = learn.Estimator(model_fn=res_net_model)
+classifier = tf.contrib.learn.Estimator(model_fn=res_net_model)
tf.logging.set_verbosity(tf.logging.INFO) # Show training logs. (avoid silence)
# Train model and save summaries into logdir.
-classifier.fit(
- mnist.train.images, mnist.train.labels, batch_size=100, steps=1000)
+classifier.fit(mnist.train.images,
+ mnist.train.labels,
+ batch_size=100,
+ steps=1000)
# Calculate accuracy.
result = classifier.evaluate(
- x=mnist.test.images, y=mnist.test.labels,
+ x=mnist.test.images,
+ y=mnist.test.labels,
metrics={
- 'accuracy': learn.metric_spec.MetricSpec(
- metric_fn=tf.contrib.metrics.streaming_accuracy,
- prediction_key='accuracy'),
+ 'accuracy':
+ tf.contrib.learn.metric_spec.MetricSpec(
+ metric_fn=tf.contrib.metrics.streaming_accuracy,
+ prediction_key='accuracy'),
})
score = result['accuracy']
print('Accuracy: {0:f}'.format(score))
diff --git a/tensorflow/examples/learn/text_classification.py b/tensorflow/examples/learn/text_classification.py
index 4fcae99d60..a3a5f9e3e9 100644
--- a/tensorflow/examples/learn/text_classification.py
+++ b/tensorflow/examples/learn/text_classification.py
@@ -25,7 +25,7 @@ import pandas
from sklearn import metrics
import tensorflow as tf
-from tensorflow.contrib import learn
+learn = tf.contrib.learn
FLAGS = None
@@ -42,11 +42,14 @@ def bag_of_words_model(features, target):
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
- loss, tf.contrib.framework.get_global_step(),
- optimizer='Adam', learning_rate=0.01)
- return (
- {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
- loss, train_op)
+ loss,
+ tf.contrib.framework.get_global_step(),
+ optimizer='Adam',
+ learning_rate=0.01)
+ return ({
+ 'class': tf.argmax(logits, 1),
+ 'prob': tf.nn.softmax(logits)
+ }, loss, train_op)
def rnn_model(features, target):
@@ -78,12 +81,15 @@ def rnn_model(features, target):
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
- loss, tf.contrib.framework.get_global_step(),
- optimizer='Adam', learning_rate=0.01)
+ loss,
+ tf.contrib.framework.get_global_step(),
+ optimizer='Adam',
+ learning_rate=0.01)
- return (
- {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
- loss, train_op)
+ return ({
+ 'class': tf.argmax(logits, 1),
+ 'prob': tf.nn.softmax(logits)
+ }, loss, train_op)
def main(unused_argv):
@@ -113,7 +119,9 @@ def main(unused_argv):
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
- p['class'] for p in classifier.predict(x_test, as_iterable=True)]
+ p['class'] for p in classifier.predict(
+ x_test, as_iterable=True)
+ ]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
@@ -124,13 +132,11 @@ if __name__ == '__main__':
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
- action='store_true'
- )
+ action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
- action='store_true'
- )
+ action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
diff --git a/tensorflow/examples/learn/text_classification_character_cnn.py b/tensorflow/examples/learn/text_classification_character_cnn.py
index ffb5a51ad4..143af4f664 100644
--- a/tensorflow/examples/learn/text_classification_character_cnn.py
+++ b/tensorflow/examples/learn/text_classification_character_cnn.py
@@ -11,10 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-"""
-This is an example of using convolutional networks over characters
-for DBpedia dataset to predict class from description of an entity.
+"""This is an example of using convolutional networks over characters for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
@@ -36,7 +33,7 @@ import pandas
from sklearn import metrics
import tensorflow as tf
-from tensorflow.contrib import learn
+learn = tf.contrib.learn
FLAGS = None
@@ -51,8 +48,8 @@ POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
- byte_list = tf.reshape(tf.one_hot(features, 256, 1, 0),
- [-1, MAX_DOCUMENT_LENGTH, 256, 1])
+ byte_list = tf.reshape(
+ tf.one_hot(features, 256, 1, 0), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
@@ -60,8 +57,11 @@ def char_cnn_model(features, target):
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
- pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
- strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
+ pool1 = tf.nn.max_pool(
+ conv1,
+ ksize=[1, POOLING_WINDOW, 1, 1],
+ strides=[1, POOLING_STRIDE, 1, 1],
+ padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
@@ -76,12 +76,15 @@ def char_cnn_model(features, target):
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
- loss, tf.contrib.framework.get_global_step(),
- optimizer='Adam', learning_rate=0.01)
+ loss,
+ tf.contrib.framework.get_global_step(),
+ optimizer='Adam',
+ learning_rate=0.01)
- return (
- {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
- loss, train_op)
+ return ({
+ 'class': tf.argmax(logits, 1),
+ 'prob': tf.nn.softmax(logits)
+ }, loss, train_op)
def main(unused_argv):
@@ -104,7 +107,9 @@ def main(unused_argv):
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
- p['class'] for p in classifier.predict(x_test, as_iterable=True)]
+ p['class'] for p in classifier.predict(
+ x_test, as_iterable=True)
+ ]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
@@ -115,7 +120,6 @@ if __name__ == '__main__':
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
- action='store_true'
- )
+ action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
diff --git a/tensorflow/examples/learn/text_classification_character_rnn.py b/tensorflow/examples/learn/text_classification_character_rnn.py
index 6b2302ed31..1cb2cd2f88 100644
--- a/tensorflow/examples/learn/text_classification_character_rnn.py
+++ b/tensorflow/examples/learn/text_classification_character_rnn.py
@@ -11,10 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-"""
-This is an example of using recurrent neural networks over characters
-for DBpedia dataset to predict class from description of an entity.
+"""This is an example of using recurrent neural networks over characters for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
@@ -36,7 +33,7 @@ import pandas
from sklearn import metrics
import tensorflow as tf
-from tensorflow.contrib import learn
+learn = tf.contrib.learn
FLAGS = None
@@ -57,12 +54,15 @@ def char_rnn_model(features, target):
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
- loss, tf.contrib.framework.get_global_step(),
- optimizer='Adam', learning_rate=0.01)
+ loss,
+ tf.contrib.framework.get_global_step(),
+ optimizer='Adam',
+ learning_rate=0.01)
- return (
- {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
- loss, train_op)
+ return ({
+ 'class': tf.argmax(logits, 1),
+ 'prob': tf.nn.softmax(logits)
+ }, loss, train_op)
def main(unused_argv):
@@ -85,7 +85,9 @@ def main(unused_argv):
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
- p['class'] for p in classifier.predict(x_test, as_iterable=True)]
+ p['class'] for p in classifier.predict(
+ x_test, as_iterable=True)
+ ]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
@@ -96,7 +98,6 @@ if __name__ == '__main__':
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
- action='store_true'
- )
+ action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
diff --git a/tensorflow/examples/learn/text_classification_cnn.py b/tensorflow/examples/learn/text_classification_cnn.py
index cb17ae46ae..41fbdba1a7 100644
--- a/tensorflow/examples/learn/text_classification_cnn.py
+++ b/tensorflow/examples/learn/text_classification_cnn.py
@@ -25,7 +25,7 @@ import pandas
from sklearn import metrics
import tensorflow as tf
-from tensorflow.contrib import learn
+learn = tf.contrib.learn
FLAGS = None
@@ -52,20 +52,22 @@ def cnn_model(features, target):
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
- conv1 = tf.contrib.layers.convolution2d(word_vectors, N_FILTERS,
- FILTER_SHAPE1, padding='VALID')
+ conv1 = tf.contrib.layers.convolution2d(
+ word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
- conv1, ksize=[1, POOLING_WINDOW, 1, 1],
- strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
+ conv1,
+ ksize=[1, POOLING_WINDOW, 1, 1],
+ strides=[1, POOLING_STRIDE, 1, 1],
+ padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
- conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,
- FILTER_SHAPE2, padding='VALID')
+ conv2 = tf.contrib.layers.convolution2d(
+ pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
@@ -74,12 +76,15 @@ def cnn_model(features, target):
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
- loss, tf.contrib.framework.get_global_step(),
- optimizer='Adam', learning_rate=0.01)
+ loss,
+ tf.contrib.framework.get_global_step(),
+ optimizer='Adam',
+ learning_rate=0.01)
- return (
- {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
- loss, train_op)
+ return ({
+ 'class': tf.argmax(logits, 1),
+ 'prob': tf.nn.softmax(logits)
+ }, loss, train_op)
def main(unused_argv):
@@ -105,7 +110,9 @@ def main(unused_argv):
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
- p['class'] for p in classifier.predict(x_test, as_iterable=True)]
+ p['class'] for p in classifier.predict(
+ x_test, as_iterable=True)
+ ]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
@@ -116,7 +123,6 @@ if __name__ == '__main__':
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
- action='store_true'
- )
+ action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)