aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/examples
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/examples')
-rw-r--r--tensorflow/examples/learn/boston.py9
-rw-r--r--tensorflow/examples/learn/iris_custom_model.py2
-rw-r--r--tensorflow/examples/learn/mnist.py2
-rw-r--r--tensorflow/examples/learn/multiple_gpu.py2
-rwxr-xr-xtensorflow/examples/learn/resnet.py2
-rw-r--r--tensorflow/examples/learn/text_classification_character_cnn.py4
-rw-r--r--tensorflow/examples/tutorials/word2vec/word2vec_basic.py2
-rw-r--r--tensorflow/examples/udacity/1_notmnist.ipynb2
8 files changed, 14 insertions, 11 deletions
diff --git a/tensorflow/examples/learn/boston.py b/tensorflow/examples/learn/boston.py
index 5d5ddff564..2986ff9106 100644
--- a/tensorflow/examples/learn/boston.py
+++ b/tensorflow/examples/learn/boston.py
@@ -43,11 +43,12 @@ def main(unused_argv):
# Fit
regressor.fit(x_train, y_train, steps=5000, batch_size=1)
-
+
+ # Transform
+ x_transformed = scaler.transform(x_test)
+
# Predict and score
- y_predicted = list(
- regressor.predict(
- scaler.transform(x_test), as_iterable=True))
+ y_predicted = list(regressor.predict(x_transformed, as_iterable=True))
score = metrics.mean_squared_error(y_predicted, y_test)
print('MSE: {0:f}'.format(score))
diff --git a/tensorflow/examples/learn/iris_custom_model.py b/tensorflow/examples/learn/iris_custom_model.py
index 31fb88954b..fbc50716c9 100644
--- a/tensorflow/examples/learn/iris_custom_model.py
+++ b/tensorflow/examples/learn/iris_custom_model.py
@@ -43,7 +43,7 @@ def my_model(features, target):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
- loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
+ loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
diff --git a/tensorflow/examples/learn/mnist.py b/tensorflow/examples/learn/mnist.py
index 4b3f1835e2..6e5fe7891b 100644
--- a/tensorflow/examples/learn/mnist.py
+++ b/tensorflow/examples/learn/mnist.py
@@ -67,7 +67,7 @@ def conv_model(feature, target, mode):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
- loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
+ loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(
diff --git a/tensorflow/examples/learn/multiple_gpu.py b/tensorflow/examples/learn/multiple_gpu.py
index a395d94151..df58906b39 100644
--- a/tensorflow/examples/learn/multiple_gpu.py
+++ b/tensorflow/examples/learn/multiple_gpu.py
@@ -60,7 +60,7 @@ def my_model(features, target):
with tf.device('/gpu:2'):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
- loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
+ loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
diff --git a/tensorflow/examples/learn/resnet.py b/tensorflow/examples/learn/resnet.py
index 49d89ef660..f822903208 100755
--- a/tensorflow/examples/learn/resnet.py
+++ b/tensorflow/examples/learn/resnet.py
@@ -144,7 +144,7 @@ def res_net(x, y, activation=tf.nn.relu):
target = tf.one_hot(y, depth=10, dtype=tf.float32)
logits = tf.contrib.layers.fully_connected(net, 10, activation_fn=None)
- loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
+ loss = tf.losses.softmax_cross_entropy(target, logits)
return tf.softmax(logits), loss
diff --git a/tensorflow/examples/learn/text_classification_character_cnn.py b/tensorflow/examples/learn/text_classification_character_cnn.py
index 143af4f664..0c96976146 100644
--- a/tensorflow/examples/learn/text_classification_character_cnn.py
+++ b/tensorflow/examples/learn/text_classification_character_cnn.py
@@ -49,7 +49,7 @@ def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(
- tf.one_hot(features, 256, 1, 0), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
+ tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
@@ -73,7 +73,7 @@ def char_cnn_model(features, target):
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
- loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
+ loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
diff --git a/tensorflow/examples/tutorials/word2vec/word2vec_basic.py b/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
index bc502edd8b..8dcd3bf37a 100644
--- a/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
+++ b/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
@@ -112,6 +112,8 @@ def generate_batch(batch_size, num_skips, skip_window):
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
+ # Backtrack a little bit to avoid skipping words in the end of a batch
+ data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
diff --git a/tensorflow/examples/udacity/1_notmnist.ipynb b/tensorflow/examples/udacity/1_notmnist.ipynb
index 4b0a20b1dd..521cbf3000 100644
--- a/tensorflow/examples/udacity/1_notmnist.ipynb
+++ b/tensorflow/examples/udacity/1_notmnist.ipynb
@@ -219,7 +219,7 @@
" print('Extracting data for %s. This may take a while. Please wait.' % root)\n",
" tar = tarfile.open(filename)\n",
" sys.stdout.flush()\n",
- " tar.extractall()\n",
+ " tar.extractall(data_root)\n",
" tar.close()\n",
" data_folders = [\n",
" os.path.join(root, d) for d in sorted(os.listdir(root))\n",