From d5a8fe3f6fb4287118a1f089e4edcbf279521604 Mon Sep 17 00:00:00 2001 From: Pete Warden Date: Wed, 16 Aug 2017 17:58:47 -0700 Subject: Automated g4 rollback of changelist 165504820 PiperOrigin-RevId: 165525716 --- WORKSPACE | 10 - tensorflow/contrib/makefile/Makefile | 17 +- .../contrib/makefile/download_dependencies.sh | 2 - tensorflow/contrib/makefile/tf_op_files.txt | 8 - tensorflow/core/kernels/BUILD | 14 - tensorflow/docs_src/tutorials/audio_recognition.md | 47 +-- tensorflow/examples/android/AndroidManifest.xml | 10 - tensorflow/examples/android/BUILD | 1 - tensorflow/examples/android/README.md | 118 ++++--- tensorflow/examples/android/download-models.gradle | 3 +- .../examples/android/res/drawable/border.xml | 19 -- .../android/res/layout/activity_speech.xml | 55 ---- .../examples/android/res/layout/list_text_item.xml | 25 -- .../examples/android/res/values/base-strings.xml | 1 - .../src/org/tensorflow/demo/RecognizeCommands.java | 186 ----------- .../src/org/tensorflow/demo/SpeechActivity.java | 353 --------------------- 16 files changed, 64 insertions(+), 805 deletions(-) delete mode 100644 tensorflow/examples/android/res/drawable/border.xml delete mode 100644 tensorflow/examples/android/res/layout/activity_speech.xml delete mode 100644 tensorflow/examples/android/res/layout/list_text_item.xml delete mode 100644 tensorflow/examples/android/src/org/tensorflow/demo/RecognizeCommands.java delete mode 100644 tensorflow/examples/android/src/org/tensorflow/demo/SpeechActivity.java diff --git a/WORKSPACE b/WORKSPACE index 5e9b991fcc..959587387e 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -80,13 +80,3 @@ new_http_archive( "http://download.tensorflow.org/models/stylize_v1.zip", ], ) - -new_http_archive( - name = "speech_commands", - build_file = "models.BUILD", - sha256 = "c3ec4fea3158eb111f1d932336351edfe8bd515bb6e87aad4f25dbad0a600d0c", - urls = [ - "http://storage.googleapis.com/download.tensorflow.org/models/speech_commands_v0.01.zip", - "http://download.tensorflow.org/models/speech_commands_v0.01.zip", - ], -) diff --git a/tensorflow/contrib/makefile/Makefile b/tensorflow/contrib/makefile/Makefile index 8d200e4c0a..2e45ddad54 100644 --- a/tensorflow/contrib/makefile/Makefile +++ b/tensorflow/contrib/makefile/Makefile @@ -74,7 +74,6 @@ HOST_INCLUDES := \ -I$(MAKEFILE_DIR)/downloads/ \ -I$(MAKEFILE_DIR)/downloads/eigen \ -I$(MAKEFILE_DIR)/downloads/gemmlowp \ - -I$(MAKEFILE_DIR)/downloads/fft2d \ -I$(HOST_GENDIR) ifeq ($(HAS_GEN_HOST_PROTOC),true) HOST_INCLUDES += -I$(MAKEFILE_DIR)/gen/protobuf-host/include @@ -154,7 +153,6 @@ INCLUDES := \ -I$(MAKEFILE_DIR)/downloads/ \ -I$(MAKEFILE_DIR)/downloads/eigen \ -I$(MAKEFILE_DIR)/downloads/gemmlowp \ --I$(MAKEFILE_DIR)/downloads/fft2d \ -I$(PROTOGENDIR) \ -I$(PBTGENDIR) ifeq ($(HAS_GEN_HOST_PROTOC),true) @@ -235,7 +233,6 @@ ifeq ($(TARGET),ANDROID) $(error "NDK_ROOT is not defined.") endif CXX := $(CC_PREFIX) $(NDK_ROOT)/toolchains/arm-linux-androideabi-4.9/prebuilt/$(OS_PATH)-x86_64/bin/arm-linux-androideabi-g++ - CC := $(CC_PREFIX) $(NDK_ROOT)/toolchains/arm-linux-androideabi-4.9/prebuilt/$(OS_PATH)-x86_64/bin/arm-linux-androideabi-gcc CXXFLAGS +=\ --sysroot $(NDK_ROOT)/platforms/android-21/arch-arm \ -Wno-narrowing \ @@ -243,6 +240,7 @@ ifeq ($(TARGET),ANDROID) -mfloat-abi=softfp \ -mfpu=neon \ -fPIE + INCLUDES = \ -I$(NDK_ROOT)/sources/android/support/include \ -I$(NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.9/include \ @@ -251,7 +249,6 @@ ifeq ($(TARGET),ANDROID) -I$(MAKEFILE_DIR)/downloads/ \ -I$(MAKEFILE_DIR)/downloads/eigen \ -I$(MAKEFILE_DIR)/downloads/gemmlowp \ --I$(MAKEFILE_DIR)/downloads/fft2d \ -I$(MAKEFILE_DIR)/gen/protobuf/include \ -I$(PROTOGENDIR) \ -I$(PBTGENDIR) @@ -504,7 +501,6 @@ $(wildcard tensorflow/core/grappler/clusters/single_machine.*) TF_CC_SRCS := $(filter-out $(CORE_CC_EXCLUDE_SRCS), $(CORE_CC_ALL_SRCS)) # Add in any extra files that don't fit the patterns easily TF_CC_SRCS += tensorflow/core/platform/default/gpu_tracer.cc -TF_CC_SRCS += tensorflow/contrib/makefile/downloads/fft2d/fftsg.c # Also include the op and kernel definitions. TF_CC_SRCS += $(shell cat $(MAKEFILE_DIR)/tf_op_files.txt) PBT_CC_SRCS := $(shell cat $(MAKEFILE_DIR)/tf_pb_text_files.txt) @@ -527,8 +523,7 @@ tensorflow/core/kernels/hexagon/hexagon_remote_fused_graph_executor_build.cc endif # File names of the intermediate files target compilation generates. -TF_CC_OBJS := $(addprefix $(OBJDIR), \ -$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(TF_CC_SRCS)))) +TF_CC_OBJS := $(addprefix $(OBJDIR), $(TF_CC_SRCS:.cc=.o)) PBT_GEN_FILES := $(addprefix $(PBTGENDIR), $(PBT_CC_SRCS)) PBT_OBJS := $(addprefix $(OBJDIR), $(PBT_CC_SRCS:.cc=.o)) PROTO_CC_SRCS := $(addprefix $(PROTOGENDIR), $(PROTO_SRCS:.proto=.pb.cc)) @@ -566,14 +561,6 @@ $(OBJDIR)%.o: %.cc | $(PBT_GEN_FILES) $(CXX) $(CXXFLAGS) $(DEPFLAGS) $(INCLUDES) -c $< -o $@ @mv -f $(DEPDIR)/$*.Td $(DEPDIR)/$*.d -# Matches on plain C files. -$(OBJDIR)%.o: %.c - @mkdir -p $(dir $@) - @mkdir -p $(dir $(DEPDIR)$*) - $(CXX) $(patsubst --std=c++11,--std=c99, $(CXXFLAGS)) -x c $(DEPFLAGS) \ -$(INCLUDES) -c $< -o $@ - @mv -f $(DEPDIR)/$*.Td $(DEPDIR)/$*.d - # Compiles C++ source files that have been generated by protoc. $(OBJDIR)%.pb.o: $(PROTOGENDIR)%.pb.cc @mkdir -p $(dir $@) diff --git a/tensorflow/contrib/makefile/download_dependencies.sh b/tensorflow/contrib/makefile/download_dependencies.sh index bc766c9e06..f123111df8 100755 --- a/tensorflow/contrib/makefile/download_dependencies.sh +++ b/tensorflow/contrib/makefile/download_dependencies.sh @@ -24,7 +24,6 @@ GEMMLOWP_URL="$(grep -o 'http.*github.com/google/gemmlowp/.*tar\.gz' "${BZL_FILE GOOGLETEST_URL="https://github.com/google/googletest/archive/release-1.8.0.tar.gz" PROTOBUF_URL="$(grep -o 'http.*github.com/google/protobuf/.*tar\.gz' "${BZL_FILE_PATH}" | grep -v bazel-mirror | head -n1)" RE2_URL="$(grep -o 'http.*github.com/google/re2/.*tar\.gz' "${BZL_FILE_PATH}" | grep -v bazel-mirror | head -n1)" -FFT2D_URL="$(grep -o 'http.*fft\.tgz' "${BZL_FILE_PATH}" | grep -v bazel-mirror | head -n1)" # TODO(petewarden): Some new code in Eigen triggers a clang bug with iOS arm64, # so work around it by patching the source. @@ -59,7 +58,6 @@ download_and_extract "${GEMMLOWP_URL}" "${DOWNLOADS_DIR}/gemmlowp" download_and_extract "${GOOGLETEST_URL}" "${DOWNLOADS_DIR}/googletest" download_and_extract "${PROTOBUF_URL}" "${DOWNLOADS_DIR}/protobuf" download_and_extract "${RE2_URL}" "${DOWNLOADS_DIR}/re2" -download_and_extract "${FFT2D_URL}" "${DOWNLOADS_DIR}/fft2d" replace_by_sed 's#static uint32x4_t p4ui_CONJ_XOR = vld1q_u32( conj_XOR_DATA );#static uint32x4_t p4ui_CONJ_XOR; // = vld1q_u32( conj_XOR_DATA ); - Removed by script#' \ "${DOWNLOADS_DIR}/eigen/Eigen/src/Core/arch/NEON/Complex.h" diff --git a/tensorflow/contrib/makefile/tf_op_files.txt b/tensorflow/contrib/makefile/tf_op_files.txt index a7f2be9790..9132a4344b 100644 --- a/tensorflow/contrib/makefile/tf_op_files.txt +++ b/tensorflow/contrib/makefile/tf_op_files.txt @@ -38,8 +38,6 @@ tensorflow/core/kernels/stack_ops.cc tensorflow/core/kernels/split_op.cc tensorflow/core/kernels/split_v_op.cc tensorflow/core/kernels/split_lib_cpu.cc -tensorflow/core/kernels/spectrogram_op.cc -tensorflow/core/kernels/spectrogram.cc tensorflow/core/kernels/sparse_to_dense_op.cc tensorflow/core/kernels/sparse_matmul_op.cc tensorflow/core/kernels/softsign_op.cc @@ -102,10 +100,6 @@ tensorflow/core/kernels/mirror_pad_op_cpu_impl_2.cc tensorflow/core/kernels/mirror_pad_op_cpu_impl_3.cc tensorflow/core/kernels/mirror_pad_op_cpu_impl_4.cc tensorflow/core/kernels/mirror_pad_op_cpu_impl_5.cc -tensorflow/core/kernels/mfcc_op.cc -tensorflow/core/kernels/mfcc_mel_filterbank.cc -tensorflow/core/kernels/mfcc_dct.cc -tensorflow/core/kernels/mfcc.cc tensorflow/core/kernels/maxpooling_op.cc tensorflow/core/kernels/matmul_op.cc tensorflow/core/kernels/lrn_op.cc @@ -123,7 +117,6 @@ tensorflow/core/kernels/fill_functor.cc tensorflow/core/kernels/fifo_queue.cc tensorflow/core/kernels/fake_quant_ops.cc tensorflow/core/kernels/example_parsing_ops.cc -tensorflow/core/kernels/encode_wav_op.cc tensorflow/core/kernels/dynamic_stitch_op.cc tensorflow/core/kernels/dynamic_partition_op.cc tensorflow/core/kernels/decode_bmp_op.cc @@ -131,7 +124,6 @@ tensorflow/core/kernels/depthtospace_op.cc tensorflow/core/kernels/spacetodepth_op.cc tensorflow/core/kernels/dense_update_ops.cc tensorflow/core/kernels/deep_conv2d.cc -tensorflow/core/kernels/decode_wav_op.cc tensorflow/core/kernels/xsmm_conv2d.cc tensorflow/core/kernels/cwise_ops_common.cc tensorflow/core/kernels/cwise_op_tanh.cc diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD index 1cc4213b16..05974f5a90 100644 --- a/tensorflow/core/kernels/BUILD +++ b/tensorflow/core/kernels/BUILD @@ -4272,9 +4272,6 @@ filegroup( "gemm_functors.h", "image_resizer_state.h", "maxpooling_op.h", - "mfcc.h", - "mfcc_dct.h", - "mfcc_mel_filterbank.h", "mirror_pad_op.h", "mirror_pad_op_cpu_impl.h", "pad_op.h", @@ -4291,7 +4288,6 @@ filegroup( "softsign_op.h", "spacetobatch_functor.h", "spacetodepth_op.h", - "spectrogram.h", "tensor_array.h", "tile_functor.h", "tile_ops_cpu_impl.h", @@ -4365,12 +4361,10 @@ filegroup( "cwise_op_squared_difference.cc", "cwise_op_sub.cc", "cwise_op_tanh.cc", - "decode_wav_op.cc", "deep_conv2d.cc", "deep_conv2d.h", "depthwise_conv_op.cc", "dynamic_partition_op.cc", - "encode_wav_op.cc", "fake_quant_ops.cc", "fifo_queue.cc", "fused_batch_norm_op.cc", @@ -4399,10 +4393,6 @@ filegroup( "logging_ops.cc", "lrn_op.cc", "maxpooling_op.cc", - "mfcc.cc", - "mfcc_dct.cc", - "mfcc_mel_filterbank.cc", - "mfcc_op.cc", "mirror_pad_op.cc", "mirror_pad_op_cpu_impl_1.cc", "mirror_pad_op_cpu_impl_2.cc", @@ -4438,8 +4428,6 @@ filegroup( "spacetobatch_op.cc", "spacetodepth_op.cc", "sparse_to_dense_op.cc", - "spectrogram.cc", - "spectrogram_op.cc", "stack_ops.cc", "string_join_op.cc", "summary_op.cc", @@ -4576,8 +4564,6 @@ cc_library( "//tensorflow/core:android_tensorflow_lib_lite", "//tensorflow/core:protos_cc", "//third_party/eigen3", - "//third_party/fft2d:fft2d_headers", - "@fft2d//:fft2d", "@gemmlowp//:gemmlowp", ], alwayslink = 1, diff --git a/tensorflow/docs_src/tutorials/audio_recognition.md b/tensorflow/docs_src/tutorials/audio_recognition.md index 2caa3ec0d2..57d3ebb996 100644 --- a/tensorflow/docs_src/tutorials/audio_recognition.md +++ b/tensorflow/docs_src/tutorials/audio_recognition.md @@ -214,41 +214,6 @@ of the other .wav files in that same folder to see how well it does. The scores are between zero and one, and higher values mean the model is more confident in its prediction. -## Running the Model in an Android App - -The easiest way to see how this model works in a real application is to download -[the prebuilt Android demo -applications](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#prebuilt-components) -and install them on your phone. You'll see 'TF Speech' appear in your app list, -and opening it will show you the same list of action words we've just trained -our model on, starting with "Yes" and "No". Once you've given the app permission -to use the microphone, you should be able to try saying those words and see them -highlighted in the UI when the model recognizes one of them. - -You can also build this application yourself, since it's open source and -[available as part of the TensorFlow repository on -github](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#building-in-android-studio-using-the-tensorflow-aar-from-jcenter). -By default it downloads [a pretrained model from -tensorflow.org](http://download.tensorflow.org/models/speech_commands_v0.01.zip), -but you can easily [replace it with a model you've trained -yourself](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#install-model-files-optional). -If you do this, you'll need to make sure that the constants in [the main -SpeechActivity Java source -file](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android/src/org/tensorflow/demo/SpeechActivity.java) -like `SAMPLE_RATE` and `SAMPLE_DURATION` match any changes you've made to the -defaults while training. You'll also see that there's a [Java version of the -RecognizeCommands -module](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android/src/org/tensorflow/demo/RecognizeCommands.java) -that's very similar to the C++ version in this tutorial. If you've tweaked -parameters for that, you can also update them in SpeechActivity to get the same -results as in your server testing. - -The demo app updates its UI list of results automatically based on the labels -text file you copy into assets alongside your frozen graph, which means you can -easily try out different models without needing to make any code changes. You -will need to updaye `LABEL_FILENAME` and `MODEL_FILENAME` to point to the files -you've added if you change the paths though. - ## How does this Model Work? The architecture used in this tutorial is based on some described in the paper @@ -376,14 +341,13 @@ aren't detected (high precision). The numbers from the tool give you an idea of how your model will perform in an application, and you can try tweaking the signal averaging parameters to tune it to give the kind of performance you want. To understand what the right parameters are for your application, you can look -at generating an [ROC -curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) to help -you understand the tradeoffs. +at generating an [ROC curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) +to help you understand the tradeoffs. ## RecognizeCommands -The streaming accuracy tool uses a simple decoder contained in a small C++ class -called +The streaming accuracy tool uses a simple decoder contained in a small +C++ class called [RecognizeCommands](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/speech_commands/recognize_commands.h). This class is fed the output of running the TensorFlow model over time, it averages the signals, and returns information about a label when it has enough @@ -516,8 +480,7 @@ variations in starting time in the training data, and is controlled with the `--time_shift_ms` flag, which defaults to 100ms. Increasing this value will provide more variation, but at the risk of cutting off important parts of the audio. A related way of augmenting the data with realistic distortions is by -using [time stretching and pitch -scaling](https://en.wikipedia.org/wiki/Audio_time_stretching_and_pitch_scaling), +using [time stretching and pitch scaling](https://en.wikipedia.org/wiki/Audio_time_stretching_and_pitch_scaling), but that's outside the scope of this tutorial. ## Customizing the Model diff --git a/tensorflow/examples/android/AndroidManifest.xml b/tensorflow/examples/android/AndroidManifest.xml index bb75431a1f..9f229d8b9d 100644 --- a/tensorflow/examples/android/AndroidManifest.xml +++ b/tensorflow/examples/android/AndroidManifest.xml @@ -22,7 +22,6 @@ - - - - - - - - diff --git a/tensorflow/examples/android/BUILD b/tensorflow/examples/android/BUILD index 2347e6b023..2d3b0911fc 100644 --- a/tensorflow/examples/android/BUILD +++ b/tensorflow/examples/android/BUILD @@ -93,7 +93,6 @@ filegroup( srcs = [ "@inception5h//:model_files", "@mobile_ssd//:model_files", - "@speech_commands//:model_files", "@stylize//:model_files", ], ) diff --git a/tensorflow/examples/android/README.md b/tensorflow/examples/android/README.md index 883f8e664f..f9881287cd 100644 --- a/tensorflow/examples/android/README.md +++ b/tensorflow/examples/android/README.md @@ -8,11 +8,10 @@ devices. The demos in this folder are designed to give straightforward samples of using TensorFlow in mobile applications. -Inference is done using the [TensorFlow Android Inference -Interface](../../../tensorflow/contrib/android), which may be built separately -if you want a standalone library to drop into your existing application. Object -tracking and efficient YUV -> RGB conversion are handled by -`libtensorflow_demo.so`. +Inference is done using the [TensorFlow Android Inference Interface](../../../tensorflow/contrib/android), +which may be built separately if you want a standalone library to drop into your +existing application. Object tracking and efficient YUV -> RGB conversion are +handled by `libtensorflow_demo.so`. A device running Android 5.0 (API 21) or higher is required to run the demo due to the use of the camera2 API, although the native libraries themselves can run @@ -34,12 +33,6 @@ on API >= 14 devices. Uses a model based on [A Learned Representation For Artistic Style](https://arxiv.org/abs/1610.07629) to restyle the camera preview image to that of a number of different artists. -4. [TF - Speech](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/android/src/org/tensorflow/demo/SpeechActivity.java): - Runs a simple speech recognition model built by the [audio training - tutorial](https://www.tensorflow.org/tutorials/image_retraining). Listens - for a small set of words, and highlights them in the UI when they are - recognized. @@ -58,22 +51,20 @@ for more details. ## Running the Demo -Once the app is installed it can be started via the "TF Classify", "TF Detect", -"TF Stylize", and "TF Speech" icons, which have the orange TensorFlow logo as -their icon. +Once the app is installed it can be started via the "TF Classify", "TF Detect" +and "TF Stylize" icons, which have the orange TensorFlow logo as their icon. While running the activities, pressing the volume keys on your device will -toggle debug visualizations on/off, rendering additional info to the screen that -may be useful for development purposes. +toggle debug visualizations on/off, rendering additional info to the screen +that may be useful for development purposes. ## Building in Android Studio using the TensorFlow AAR from JCenter The simplest way to compile the demo app yourself, and try out changes to the -project code is to use AndroidStudio. Simply set this `android` directory as the -project root. +project code is to use AndroidStudio. Simply set this `android` directory as the project root. -Then edit the `build.gradle` file and change the value of `nativeBuildSystem` to -`'none'` so that the project is built in the simplest way possible: +Then edit the `build.gradle` file and change the value of `nativeBuildSystem` +to `'none'` so that the project is built in the simplest way possible: ```None def nativeBuildSystem = 'none' @@ -86,8 +77,8 @@ Note: Currently, in this build mode, YUV -> RGB is done using a less efficient Java implementation, and object tracking is not available in the "TF Detect" activity. Setting the build system to `'cmake'` currently only builds `libtensorflow_demo.so`, which provides fast YUV -> RGB conversion and object -tracking, while still acquiring TensorFlow support via the downloaded AAR, so it -may be a lightweight way to enable these features. +tracking, while still acquiring TensorFlow support via the downloaded AAR, so +it may be a lightweight way to enable these features. For any project that does not include custom low level TensorFlow code, this is likely sufficient. @@ -113,51 +104,50 @@ protobuf compilation. NOTE: Bazel does not currently support building for Android on Windows. Full support for gradle/cmake builds is coming soon, but in the meantime we suggest -that Windows users download the [prebuilt -binaries](https://ci.tensorflow.org/view/Nightly/job/nightly-android/) instead. +that Windows users download the +[prebuilt binaries](https://ci.tensorflow.org/view/Nightly/job/nightly-android/) +instead. ##### Install Bazel and Android Prerequisites -Bazel is the primary build system for TensorFlow. To build with Bazel, it and -the Android NDK and SDK must be installed on your system. - -1. Install the latest version of Bazel as per the instructions [on the Bazel - website](https://bazel.build/versions/master/docs/install.html). -2. The Android NDK is required to build the native (C/C++) TensorFlow code. The - current recommended version is 12b, which may be found - [here](https://developer.android.com/ndk/downloads/older_releases.html#ndk-12b-downloads). -3. The Android SDK and build tools may be obtained - [here](https://developer.android.com/tools/revisions/build-tools.html), or - alternatively as part of [Android - Studio](https://developer.android.com/studio/index.html). Build tools API >= - 23 is required to build the TF Android demo (though it will run on API >= 21 - devices). +Bazel is the primary build system for TensorFlow. To build with Bazel, +it and the Android NDK and SDK must be installed on your system. + +1. Install the latest version of Bazel as per the instructions [on the Bazel website](https://bazel.build/versions/master/docs/install.html). +2. The Android NDK is required to build the native (C/C++) TensorFlow code. + The current recommended version is 12b, which may be found + [here](https://developer.android.com/ndk/downloads/older_releases.html#ndk-12b-downloads). +3. The Android SDK and build tools may be obtained + [here](https://developer.android.com/tools/revisions/build-tools.html), + or alternatively as part of + [Android Studio](https://developer.android.com/studio/index.html). Build + tools API >= 23 is required to build the TF Android demo (though it will + run on API >= 21 devices). ##### Edit WORKSPACE -The Android entries in -[`/WORKSPACE`](../../../WORKSPACE#L19-L36) must be uncommented -with the paths filled in appropriately depending on where you installed the NDK -and SDK. Otherwise an error such as: "The external label -'//external:android/sdk' is not bound to anything" will be reported. +The Android entries in [`/WORKSPACE`](../../../WORKSPACE#L19-L36) +must be uncommented with the paths filled in appropriately depending on where +you installed the NDK and SDK. Otherwise an error such as: +"The external label '//external:android/sdk' is not bound to anything" will +be reported. -Also edit the API levels for the SDK in WORKSPACE to the highest level you have -installed in your SDK. This must be >= 23 (this is completely independent of the -API level of the demo, which is defined in AndroidManifest.xml). The NDK API -level may remain at 14. +Also edit the API levels for the SDK in WORKSPACE to the highest level you +have installed in your SDK. This must be >= 23 (this is completely independent +of the API level of the demo, which is defined in AndroidManifest.xml). +The NDK API level may remain at 14. ##### Install Model Files (optional) -The TensorFlow `GraphDef`s that contain the model definitions and weights are -not packaged in the repo because of their size. They are downloaded +The TensorFlow `GraphDef`s that contain the model definitions and weights +are not packaged in the repo because of their size. They are downloaded automatically and packaged with the APK by Bazel via a new_http_archive defined -in `WORKSPACE` during the build process, and by Gradle via -download-models.gradle. +in `WORKSPACE` during the build process, and by Gradle via download-models.gradle. -**Optional**: If you wish to place the models in your assets manually, remove -all of the `model_files` entries from the `assets` list in `tensorflow_demo` -found in the `[BUILD](BUILD)` file. Then download and extract the archives -yourself to the `assets` directory in the source tree: +**Optional**: If you wish to place the models in your assets manually, +remove all of the `model_files` entries from the `assets` +list in `tensorflow_demo` found in the `[BUILD](BUILD)` file. Then download +and extract the archives yourself to the `assets` directory in the source tree: ```bash BASE_URL=https://storage.googleapis.com/download.tensorflow.org/models @@ -172,23 +162,27 @@ This will extract the models and their associated metadata files to the local assets/ directory. If you are using Gradle, make sure to remove download-models.gradle reference -from build.gradle after your manually download models; otherwise gradle might -download models again and overwrite your models. +from build.gradle after your manually download models; otherwise gradle +might download models again and overwrite your models. ##### Build -After editing your WORKSPACE file to update the SDK/NDK configuration, you may -build the APK. Run this from your workspace root: +After editing your WORKSPACE file to update the SDK/NDK configuration, +you may build the APK. Run this from your workspace root: ```bash bazel build -c opt //tensorflow/examples/android:tensorflow_demo ``` +If you get build errors about protocol buffers, run +`git submodule update --init` and make sure that you've modified your WORKSPACE +file as instructed, then try building again. + ##### Install -Make sure that adb debugging is enabled on your Android 5.0 (API 21) or later -device, then after building use the following command from your workspace root -to install the APK: +Make sure that adb debugging is enabled on your Android 5.0 (API 21) or +later device, then after building use the following command from your workspace +root to install the APK: ```bash adb install -r bazel-bin/tensorflow/examples/android/tensorflow_demo.apk diff --git a/tensorflow/examples/android/download-models.gradle b/tensorflow/examples/android/download-models.gradle index 0e2cf65f53..a19ca36d7f 100644 --- a/tensorflow/examples/android/download-models.gradle +++ b/tensorflow/examples/android/download-models.gradle @@ -11,8 +11,7 @@ // LINT.IfChange def models = ['inception5h.zip', 'object_detection/ssd_mobilenet_v1_android_export.zip', - 'stylize_v1.zip', - 'speech_commands_conv_actions.zip'] + 'stylize_v1.zip'] // LINT.ThenChange(//tensorflow/examples/android/BUILD) // Root URL for model archives diff --git a/tensorflow/examples/android/res/drawable/border.xml b/tensorflow/examples/android/res/drawable/border.xml deleted file mode 100644 index dd1d64d1d6..0000000000 --- a/tensorflow/examples/android/res/drawable/border.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - - - diff --git a/tensorflow/examples/android/res/layout/activity_speech.xml b/tensorflow/examples/android/res/layout/activity_speech.xml deleted file mode 100644 index 2fe1338da5..0000000000 --- a/tensorflow/examples/android/res/layout/activity_speech.xml +++ /dev/null @@ -1,55 +0,0 @@ - - - - - - - -