aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/examples/tutorials
diff options
context:
space:
mode:
authorGravatar Martin Wicke <wicke@google.com>2017-03-23 12:31:16 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-03-23 13:44:29 -0700
commitbc456e361d49d1d89a74b80060c70efb51fd7d87 (patch)
tree825e04287f1e2d2ac098ca3f0fdd4e361aefd68c /tensorflow/examples/tutorials
parent8ca071456537e6c96ae8896c2a20b1f08b0e59d3 (diff)
Merge changes from github.
Change: 151046259
Diffstat (limited to 'tensorflow/examples/tutorials')
-rw-r--r--tensorflow/examples/tutorials/deepdream/deepdream.ipynb2
-rw-r--r--tensorflow/examples/tutorials/word2vec/word2vec_basic.py6
2 files changed, 4 insertions, 4 deletions
diff --git a/tensorflow/examples/tutorials/deepdream/deepdream.ipynb b/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
index cbcc54ce3c..016b21cd12 100644
--- a/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
+++ b/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
@@ -278,7 +278,7 @@
" tensor = n.attr['value'].tensor\n",
" size = len(tensor.tensor_content)\n",
" if size > max_const_size:\n",
- " tensor.tensor_content = bytes(\"<stripped %d bytes>\"%size, 'utf-8')\n",
+ " tensor.tensor_content = bytes(\"<stripped %d bytes>\"%size)\n",
" return strip_def\n",
" \n",
"def rename_nodes(graph_def, rename_func):\n",
diff --git a/tensorflow/examples/tutorials/word2vec/word2vec_basic.py b/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
index 25800c109e..f54a7c37a1 100644
--- a/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
+++ b/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
@@ -62,7 +62,7 @@ print('Data size', len(words))
vocabulary_size = 50000
-def build_dataset(words):
+def build_dataset(words, vocabulary_size):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
@@ -81,7 +81,7 @@ def build_dataset(words):
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
-data, count, dictionary, reverse_dictionary = build_dataset(words)
+data, count, dictionary, reverse_dictionary = build_dataset(words, vocabulary_size)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
@@ -181,7 +181,7 @@ with graph.as_default():
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
- init = tf.initialize_all_variables()
+ init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001