aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/examples/learn
diff options
context:
space:
mode:
authorGravatar Andrew Harp <andrewharp@google.com>2017-03-01 17:59:22 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-03-01 18:08:24 -0800
commit3e975ea978bac4d861bb09328b06f3c316212611 (patch)
tree79bac044c9723df8443495eb962c2dd98a2ed421 /tensorflow/examples/learn
parent8043a27ed77f59bb68409070f2bfa01df0e04b89 (diff)
Merge changes from github.
Change: 148954491
Diffstat (limited to 'tensorflow/examples/learn')
-rw-r--r--tensorflow/examples/learn/mnist.py4
-rw-r--r--tensorflow/examples/learn/text_classification.py9
2 files changed, 9 insertions, 4 deletions
diff --git a/tensorflow/examples/learn/mnist.py b/tensorflow/examples/learn/mnist.py
index 6e5fe7891b..15cf4b91dd 100644
--- a/tensorflow/examples/learn/mnist.py
+++ b/tensorflow/examples/learn/mnist.py
@@ -46,13 +46,13 @@ def conv_model(feature, target, mode):
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
- h_conv1 = layers.convolution(
+ h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
- h_conv2 = layers.convolution(
+ h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
diff --git a/tensorflow/examples/learn/text_classification.py b/tensorflow/examples/learn/text_classification.py
index a3a5f9e3e9..c3d00a11b9 100644
--- a/tensorflow/examples/learn/text_classification.py
+++ b/tensorflow/examples/learn/text_classification.py
@@ -104,8 +104,13 @@ def main(unused_argv):
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
- x_train = np.array(list(vocab_processor.fit_transform(x_train)))
- x_test = np.array(list(vocab_processor.transform(x_test)))
+
+ x_transform_train = vocab_processor.fit_transform(x_train)
+ x_transform_test = vocab_processor.transform(x_test)
+
+ x_train = np.array(list(x_transform_train))
+ x_test = np.array(list(x_transform_test))
+
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)