diff options
author | Vijay Vasudevan <vrv@google.com> | 2017-02-17 17:05:49 -0800 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2017-02-17 17:23:48 -0800 |
commit | 93a975e114ee1c35f01ed3bdd47170e6f7129014 (patch) | |
tree | e34255aff698fe6a4a586e7940337fd278947f58 /tensorflow/examples/learn | |
parent | eb9624017a0040e805fda622a5f9ec6681e24246 (diff) |
Merge changes from github.
Change: 147897309
Diffstat (limited to 'tensorflow/examples/learn')
-rw-r--r-- | tensorflow/examples/learn/boston.py | 9 | ||||
-rw-r--r-- | tensorflow/examples/learn/iris_custom_model.py | 2 | ||||
-rw-r--r-- | tensorflow/examples/learn/mnist.py | 2 | ||||
-rw-r--r-- | tensorflow/examples/learn/multiple_gpu.py | 2 | ||||
-rwxr-xr-x | tensorflow/examples/learn/resnet.py | 2 | ||||
-rw-r--r-- | tensorflow/examples/learn/text_classification_character_cnn.py | 4 |
6 files changed, 11 insertions, 10 deletions
diff --git a/tensorflow/examples/learn/boston.py b/tensorflow/examples/learn/boston.py index 5d5ddff564..2986ff9106 100644 --- a/tensorflow/examples/learn/boston.py +++ b/tensorflow/examples/learn/boston.py @@ -43,11 +43,12 @@ def main(unused_argv): # Fit regressor.fit(x_train, y_train, steps=5000, batch_size=1) - + + # Transform + x_transformed = scaler.transform(x_test) + # Predict and score - y_predicted = list( - regressor.predict( - scaler.transform(x_test), as_iterable=True)) + y_predicted = list(regressor.predict(x_transformed, as_iterable=True)) score = metrics.mean_squared_error(y_predicted, y_test) print('MSE: {0:f}'.format(score)) diff --git a/tensorflow/examples/learn/iris_custom_model.py b/tensorflow/examples/learn/iris_custom_model.py index 31fb88954b..fbc50716c9 100644 --- a/tensorflow/examples/learn/iris_custom_model.py +++ b/tensorflow/examples/learn/iris_custom_model.py @@ -43,7 +43,7 @@ def my_model(features, target): # Compute logits (1 per class) and compute loss. logits = layers.fully_connected(features, 3, activation_fn=None) - loss = tf.contrib.losses.softmax_cross_entropy(logits, target) + loss = tf.losses.softmax_cross_entropy(target, logits) # Create a tensor for training op. train_op = tf.contrib.layers.optimize_loss( diff --git a/tensorflow/examples/learn/mnist.py b/tensorflow/examples/learn/mnist.py index 4b3f1835e2..6e5fe7891b 100644 --- a/tensorflow/examples/learn/mnist.py +++ b/tensorflow/examples/learn/mnist.py @@ -67,7 +67,7 @@ def conv_model(feature, target, mode): # Compute logits (1 per class) and compute loss. logits = layers.fully_connected(h_fc1, 10, activation_fn=None) - loss = tf.contrib.losses.softmax_cross_entropy(logits, target) + loss = tf.losses.softmax_cross_entropy(target, logits) # Create a tensor for training op. train_op = layers.optimize_loss( diff --git a/tensorflow/examples/learn/multiple_gpu.py b/tensorflow/examples/learn/multiple_gpu.py index a395d94151..df58906b39 100644 --- a/tensorflow/examples/learn/multiple_gpu.py +++ b/tensorflow/examples/learn/multiple_gpu.py @@ -60,7 +60,7 @@ def my_model(features, target): with tf.device('/gpu:2'): # Compute logits (1 per class) and compute loss. logits = layers.fully_connected(features, 3, activation_fn=None) - loss = tf.contrib.losses.softmax_cross_entropy(logits, target) + loss = tf.losses.softmax_cross_entropy(target, logits) # Create a tensor for training op. train_op = tf.contrib.layers.optimize_loss( diff --git a/tensorflow/examples/learn/resnet.py b/tensorflow/examples/learn/resnet.py index 49d89ef660..f822903208 100755 --- a/tensorflow/examples/learn/resnet.py +++ b/tensorflow/examples/learn/resnet.py @@ -144,7 +144,7 @@ def res_net(x, y, activation=tf.nn.relu): target = tf.one_hot(y, depth=10, dtype=tf.float32) logits = tf.contrib.layers.fully_connected(net, 10, activation_fn=None) - loss = tf.contrib.losses.softmax_cross_entropy(logits, target) + loss = tf.losses.softmax_cross_entropy(target, logits) return tf.softmax(logits), loss diff --git a/tensorflow/examples/learn/text_classification_character_cnn.py b/tensorflow/examples/learn/text_classification_character_cnn.py index 143af4f664..0c96976146 100644 --- a/tensorflow/examples/learn/text_classification_character_cnn.py +++ b/tensorflow/examples/learn/text_classification_character_cnn.py @@ -49,7 +49,7 @@ def char_cnn_model(features, target): """Character level convolutional neural network model to predict classes.""" target = tf.one_hot(target, 15, 1, 0) byte_list = tf.reshape( - tf.one_hot(features, 256, 1, 0), [-1, MAX_DOCUMENT_LENGTH, 256, 1]) + tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1]) with tf.variable_scope('CNN_Layer1'): # Apply Convolution filtering on input sequence. conv1 = tf.contrib.layers.convolution2d( @@ -73,7 +73,7 @@ def char_cnn_model(features, target): # Apply regular WX + B and classification. logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None) - loss = tf.contrib.losses.softmax_cross_entropy(logits, target) + loss = tf.losses.softmax_cross_entropy(target, logits) train_op = tf.contrib.layers.optimize_loss( loss, |