From 2d0531d72c7dcbb0e149cafdd3a16ee8c3ff357a Mon Sep 17 00:00:00 2001 From: Jacques Pienaar Date: Wed, 21 Mar 2018 12:07:51 -0700 Subject: Merge changes from github. PiperOrigin-RevId: 189945839 --- tensorflow/examples/android/AndroidManifest.xml | 4 ++ .../src/org/tensorflow/demo/CameraActivity.java | 7 ++- .../src/org/tensorflow/demo/StylizeActivity.java | 60 ++++++++++++++++++++++ tensorflow/examples/ios/README.md | 6 ++- tensorflow/examples/learn/mnist.py | 6 ++- tensorflow/examples/learn/resnet.py | 12 +++-- 6 files changed, 85 insertions(+), 10 deletions(-) (limited to 'tensorflow/examples') diff --git a/tensorflow/examples/android/AndroidManifest.xml b/tensorflow/examples/android/AndroidManifest.xml index bb75431a1f..5c47ce6b67 100644 --- a/tensorflow/examples/android/AndroidManifest.xml +++ b/tensorflow/examples/android/AndroidManifest.xml @@ -40,6 +40,7 @@ + @@ -49,6 +50,7 @@ + @@ -58,6 +60,7 @@ + @@ -67,6 +70,7 @@ + diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java b/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java index 8bd4abb154..429138abe5 100644 --- a/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java +++ b/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java @@ -351,6 +351,10 @@ public abstract class CameraActivity extends Activity protected void setFragment() { String cameraId = chooseCamera(); + if (cameraId == null) { + Toast.makeText(this, "No Camera Detected", Toast.LENGTH_SHORT).show(); + finish(); + } Fragment fragment; if (useCamera2API) { @@ -416,7 +420,8 @@ public abstract class CameraActivity extends Activity @Override public boolean onKeyDown(final int keyCode, final KeyEvent event) { - if (keyCode == KeyEvent.KEYCODE_VOLUME_DOWN || keyCode == KeyEvent.KEYCODE_VOLUME_UP) { + if (keyCode == KeyEvent.KEYCODE_VOLUME_DOWN || keyCode == KeyEvent.KEYCODE_VOLUME_UP + || keyCode == KeyEvent.KEYCODE_BUTTON_L1 || keyCode == KeyEvent.KEYCODE_DPAD_CENTER) { debug = !debug; requestRender(); onSetDebug(debug); diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/StylizeActivity.java b/tensorflow/examples/android/src/org/tensorflow/demo/StylizeActivity.java index 6a66ec3927..33ec65e9f7 100644 --- a/tensorflow/examples/android/src/org/tensorflow/demo/StylizeActivity.java +++ b/tensorflow/examples/android/src/org/tensorflow/demo/StylizeActivity.java @@ -16,8 +16,10 @@ package org.tensorflow.demo; +import android.app.UiModeManager; import android.content.Context; import android.content.res.AssetManager; +import android.content.res.Configuration; import android.graphics.Bitmap; import android.graphics.Bitmap.Config; import android.graphics.BitmapFactory; @@ -31,9 +33,11 @@ import android.graphics.Typeface; import android.media.ImageReader.OnImageAvailableListener; import android.os.Bundle; import android.os.SystemClock; +import android.util.DisplayMetrics; import android.util.Size; import android.util.TypedValue; import android.view.Display; +import android.view.KeyEvent; import android.view.MotionEvent; import android.view.View; import android.view.View.OnClickListener; @@ -43,6 +47,7 @@ import android.widget.BaseAdapter; import android.widget.Button; import android.widget.GridView; import android.widget.ImageView; +import android.widget.RelativeLayout; import android.widget.Toast; import java.io.IOException; import java.io.InputStream; @@ -381,6 +386,27 @@ public class StylizeActivity extends CameraActivity implements OnImageAvailableL grid = (GridView) findViewById(R.id.grid_layout); grid.setAdapter(adapter); grid.setOnTouchListener(gridTouchAdapter); + + // Change UI on Android TV + UiModeManager uiModeManager = (UiModeManager) getSystemService(UI_MODE_SERVICE); + if (uiModeManager.getCurrentModeType() == Configuration.UI_MODE_TYPE_TELEVISION) { + DisplayMetrics displayMetrics = new DisplayMetrics(); + getWindowManager().getDefaultDisplay().getMetrics(displayMetrics); + int styleSelectorHeight = displayMetrics.heightPixels; + int styleSelectorWidth = displayMetrics.widthPixels - styleSelectorHeight; + RelativeLayout.LayoutParams layoutParams = new RelativeLayout.LayoutParams(styleSelectorWidth, ViewGroup.LayoutParams.MATCH_PARENT); + + // Calculate number of style in a row, so all the style can show up without scrolling + int numOfStylePerRow = 3; + while (styleSelectorWidth / numOfStylePerRow * Math.ceil((float) (adapter.getCount() - 2) / numOfStylePerRow) > styleSelectorHeight) { + numOfStylePerRow++; + } + grid.setNumColumns(numOfStylePerRow); + layoutParams.addRule(RelativeLayout.ALIGN_PARENT_RIGHT); + grid.setLayoutParams(layoutParams); + adapter.buttons.clear(); + } + setStyle(adapter.items[0], 1.0f); } @@ -602,4 +628,38 @@ public class StylizeActivity extends CameraActivity implements OnImageAvailableL borderedText.drawLines(canvas, 10, canvas.getHeight() - 10, lines); } + + @Override + public boolean onKeyDown(int keyCode, KeyEvent event) { + int moveOffset = 0; + switch (keyCode) { + case KeyEvent.KEYCODE_DPAD_LEFT: + moveOffset = -1; + break; + case KeyEvent.KEYCODE_DPAD_RIGHT: + moveOffset = 1; + break; + case KeyEvent.KEYCODE_DPAD_UP: + moveOffset = -1 * grid.getNumColumns(); + break; + case KeyEvent.KEYCODE_DPAD_DOWN: + moveOffset = grid.getNumColumns(); + break; + default: + return super.onKeyDown(keyCode, event); + } + + // get the highest selected style + int currentSelect = 0; + float highestValue = 0; + for (int i = 0; i < adapter.getCount(); i++) { + if (adapter.items[i].value > highestValue) { + currentSelect = i; + highestValue = adapter.items[i].value; + } + } + setStyle(adapter.items[(currentSelect + moveOffset + adapter.getCount()) % adapter.getCount()], 1); + + return true; + } } diff --git a/tensorflow/examples/ios/README.md b/tensorflow/examples/ios/README.md index 5bdaeb43ce..5d7bd36837 100644 --- a/tensorflow/examples/ios/README.md +++ b/tensorflow/examples/ios/README.md @@ -119,11 +119,13 @@ rundown: `tensorflow/contrib/makefile/gen/lib` to the Library Search Paths setting. - You'll also need to add `libprotobuf.a` and `libprotobuf-lite.a` from - `tensorflow/contrib/makefile/gen/protobuf_ios/lib` to your _Build Stages_ and - _Library Search Paths_. + `tensorflow/contrib/makefile/gen/protobuf_ios/lib` + and `nsync.a` from `tensorflow/contrib/makefile/downloads/nsync/builds/lipo.ios.c++11` + to your _Build Stages_ and _Library Search Paths_. - The _Header Search_ paths needs to contain: - the root folder of tensorflow, + - `tensorflow/contrib/makefile/downloads/nsync/public` - `tensorflow/contrib/makefile/downloads/protobuf/src` - `tensorflow/contrib/makefile/downloads`, - `tensorflow/contrib/makefile/downloads/eigen`, and diff --git a/tensorflow/examples/learn/mnist.py b/tensorflow/examples/learn/mnist.py index 98819b20bf..3ead8614b6 100644 --- a/tensorflow/examples/learn/mnist.py +++ b/tensorflow/examples/learn/mnist.py @@ -61,8 +61,10 @@ def conv_model(features, labels, mode): # Densely connected layer with 1024 neurons. h_fc1 = tf.layers.dense(h_pool2_flat, 1024, activation=tf.nn.relu) - if mode == tf.estimator.ModeKeys.TRAIN: - h_fc1 = tf.layers.dropout(h_fc1, rate=0.5) + h_fc1 = tf.layers.dropout( + h_fc1, + rate=0.5, + training=(mode == tf.estimator.ModeKeys.TRAIN)) # Compute logits (1 per class) and compute loss. logits = tf.layers.dense(h_fc1, N_DIGITS, activation=None) diff --git a/tensorflow/examples/learn/resnet.py b/tensorflow/examples/learn/resnet.py index 9542e55250..c00de932a8 100755 --- a/tensorflow/examples/learn/resnet.py +++ b/tensorflow/examples/learn/resnet.py @@ -53,6 +53,8 @@ def res_net_model(features, labels, mode): ndim = int(sqrt(input_shape[1])) x = tf.reshape(x, [-1, ndim, ndim, 1]) + training = (mode == tf.estimator.ModeKeys.TRAIN) + # First convolution expands to 64 channels with tf.variable_scope('conv_layer1'): net = tf.layers.conv2d( @@ -60,7 +62,7 @@ def res_net_model(features, labels, mode): filters=64, kernel_size=7, activation=tf.nn.relu) - net = tf.layers.batch_normalization(net) + net = tf.layers.batch_normalization(net, training=training) # Max pool net = tf.layers.max_pooling2d( @@ -88,7 +90,7 @@ def res_net_model(features, labels, mode): kernel_size=1, padding='valid', activation=tf.nn.relu) - conv = tf.layers.batch_normalization(conv) + conv = tf.layers.batch_normalization(conv, training=training) with tf.variable_scope(name + '/conv_bottleneck'): conv = tf.layers.conv2d( @@ -97,7 +99,7 @@ def res_net_model(features, labels, mode): kernel_size=3, padding='same', activation=tf.nn.relu) - conv = tf.layers.batch_normalization(conv) + conv = tf.layers.batch_normalization(conv, training=training) # 1x1 convolution responsible for restoring dimension with tf.variable_scope(name + '/conv_out'): @@ -108,7 +110,7 @@ def res_net_model(features, labels, mode): kernel_size=1, padding='valid', activation=tf.nn.relu) - conv = tf.layers.batch_normalization(conv) + conv = tf.layers.batch_normalization(conv, training=training) # shortcut connections that turn the network into its counterpart # residual function (identity shortcut) @@ -154,7 +156,7 @@ def res_net_model(features, labels, mode): loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) # Create training op. - if mode == tf.estimator.ModeKeys.TRAIN: + if training: optimizer = tf.train.AdagradOptimizer(learning_rate=0.01) train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) -- cgit v1.2.3