aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/examples/android/jni
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/examples/android/jni')
-rwxr-xr-xtensorflow/examples/android/jni/__init__.py0
-rw-r--r--tensorflow/examples/android/jni/imageutils_jni.cc122
-rw-r--r--tensorflow/examples/android/jni/jni_utils.cc144
-rw-r--r--tensorflow/examples/android/jni/jni_utils.h30
-rwxr-xr-xtensorflow/examples/android/jni/libpthread.sobin0 -> 14096 bytes
-rwxr-xr-xtensorflow/examples/android/jni/rgb2yuv.cc89
-rwxr-xr-xtensorflow/examples/android/jni/rgb2yuv.h23
-rw-r--r--tensorflow/examples/android/jni/tensorflow_jni.cc253
-rw-r--r--tensorflow/examples/android/jni/tensorflow_jni.h36
-rw-r--r--tensorflow/examples/android/jni/yuv2rgb.cc161
-rw-r--r--tensorflow/examples/android/jni/yuv2rgb.h37
11 files changed, 895 insertions, 0 deletions
diff --git a/tensorflow/examples/android/jni/__init__.py b/tensorflow/examples/android/jni/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tensorflow/examples/android/jni/__init__.py
diff --git a/tensorflow/examples/android/jni/imageutils_jni.cc b/tensorflow/examples/android/jni/imageutils_jni.cc
new file mode 100644
index 0000000000..a1f88fb867
--- /dev/null
+++ b/tensorflow/examples/android/jni/imageutils_jni.cc
@@ -0,0 +1,122 @@
+// This file binds the native image utility code to the Java class
+// which exposes them.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "tensorflow/core/platform/port.h"
+#include "tensorflow/examples/android/jni/rgb2yuv.h"
+#include "tensorflow/examples/android/jni/yuv2rgb.h"
+
+#define IMAGEUTILS_METHOD(METHOD_NAME) \
+ Java_org_tensorflow_demo_env_ImageUtils_##METHOD_NAME // NOLINT
+
+using namespace tensorflow;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+JNIEXPORT void JNICALL
+IMAGEUTILS_METHOD(convertYUV420SPToARGB8888)(
+ JNIEnv* env, jclass clazz, jbyteArray input, jintArray output,
+ jint width, jint height, jboolean halfSize);
+
+JNIEXPORT void JNICALL
+IMAGEUTILS_METHOD(convertYUV420SPToRGB565)(
+ JNIEnv* env, jclass clazz, jbyteArray input, jbyteArray output,
+ jint width, jint height);
+
+JNIEXPORT void JNICALL
+IMAGEUTILS_METHOD(convertARGB8888ToYUV420SP)(
+ JNIEnv* env, jclass clazz, jintArray input, jbyteArray output,
+ jint width, jint height);
+
+JNIEXPORT void JNICALL
+IMAGEUTILS_METHOD(convertRGB565ToYUV420SP)(
+ JNIEnv* env, jclass clazz, jbyteArray input, jbyteArray output,
+ jint width, jint height);
+
+#ifdef __cplusplus
+}
+#endif
+
+JNIEXPORT void JNICALL
+IMAGEUTILS_METHOD(convertYUV420SPToARGB8888)(
+ JNIEnv* env, jclass clazz, jbyteArray input, jintArray output,
+ jint width, jint height, jboolean halfSize) {
+ jboolean inputCopy = JNI_FALSE;
+ jbyte* const i = env->GetByteArrayElements(input, &inputCopy);
+
+ jboolean outputCopy = JNI_FALSE;
+ jint* const o = env->GetIntArrayElements(output, &outputCopy);
+
+ if (halfSize) {
+ ConvertYUV420SPToARGB8888HalfSize(reinterpret_cast<uint8*>(i),
+ reinterpret_cast<uint32*>(o),
+ width, height);
+ } else {
+ ConvertYUV420SPToARGB8888(reinterpret_cast<uint8*>(i),
+ reinterpret_cast<uint8*>(i) + width * height,
+ reinterpret_cast<uint32*>(o),
+ width, height);
+ }
+
+ env->ReleaseByteArrayElements(input, i, JNI_ABORT);
+ env->ReleaseIntArrayElements(output, o, 0);
+}
+
+JNIEXPORT void JNICALL
+IMAGEUTILS_METHOD(convertYUV420SPToRGB565)(
+ JNIEnv* env, jclass clazz, jbyteArray input, jbyteArray output,
+ jint width, jint height) {
+ jboolean inputCopy = JNI_FALSE;
+ jbyte* const i = env->GetByteArrayElements(input, &inputCopy);
+
+ jboolean outputCopy = JNI_FALSE;
+ jbyte* const o = env->GetByteArrayElements(output, &outputCopy);
+
+ ConvertYUV420SPToRGB565(reinterpret_cast<uint8*>(i),
+ reinterpret_cast<uint16*>(o),
+ width, height);
+
+ env->ReleaseByteArrayElements(input, i, JNI_ABORT);
+ env->ReleaseByteArrayElements(output, o, 0);
+}
+
+JNIEXPORT void JNICALL
+IMAGEUTILS_METHOD(convertARGB8888ToYUV420SP)(
+ JNIEnv* env, jclass clazz, jintArray input, jbyteArray output,
+ jint width, jint height) {
+ jboolean inputCopy = JNI_FALSE;
+ jint* const i = env->GetIntArrayElements(input, &inputCopy);
+
+ jboolean outputCopy = JNI_FALSE;
+ jbyte* const o = env->GetByteArrayElements(output, &outputCopy);
+
+ ConvertARGB8888ToYUV420SP(reinterpret_cast<uint32*>(i),
+ reinterpret_cast<uint8*>(o),
+ width, height);
+
+ env->ReleaseIntArrayElements(input, i, JNI_ABORT);
+ env->ReleaseByteArrayElements(output, o, 0);
+}
+
+JNIEXPORT void JNICALL
+IMAGEUTILS_METHOD(convertRGB565ToYUV420SP)(
+ JNIEnv* env, jclass clazz, jbyteArray input, jbyteArray output,
+ jint width, jint height) {
+ jboolean inputCopy = JNI_FALSE;
+ jbyte* const i = env->GetByteArrayElements(input, &inputCopy);
+
+ jboolean outputCopy = JNI_FALSE;
+ jbyte* const o = env->GetByteArrayElements(output, &outputCopy);
+
+ ConvertRGB565ToYUV420SP(reinterpret_cast<uint16*>(i),
+ reinterpret_cast<uint8*>(o),
+ width, height);
+
+ env->ReleaseByteArrayElements(input, i, JNI_ABORT);
+ env->ReleaseByteArrayElements(output, o, 0);
+}
diff --git a/tensorflow/examples/android/jni/jni_utils.cc b/tensorflow/examples/android/jni/jni_utils.cc
new file mode 100644
index 0000000000..3fffc19cb6
--- /dev/null
+++ b/tensorflow/examples/android/jni/jni_utils.cc
@@ -0,0 +1,144 @@
+#include "tensorflow/examples/android/jni/jni_utils.h"
+
+#include <android/asset_manager.h>
+#include <android/asset_manager_jni.h>
+#include <jni.h>
+#include <stdlib.h>
+
+#include <string>
+#include <vector>
+#include <fstream>
+#include <sstream>
+
+#include "tensorflow/core/platform/logging.h"
+#include "google/protobuf/src/google/protobuf/io/zero_copy_stream_impl.h"
+#include "google/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.h"
+#include "google/protobuf/src/google/protobuf/io/coded_stream.h"
+#include "google/protobuf/src/google/protobuf/message_lite.h"
+
+static const char* const ASSET_PREFIX = "file:///android_asset/";
+
+namespace {
+class IfstreamInputStream : public ::google::protobuf::io::CopyingInputStream {
+ public:
+ explicit IfstreamInputStream(const std::string& file_name)
+ : ifs_(file_name.c_str(), std::ios::in | std::ios::binary) {}
+ ~IfstreamInputStream() { ifs_.close(); }
+
+ int Read(void* buffer, int size) {
+ if (!ifs_) {
+ return -1;
+ }
+ ifs_.read(static_cast<char*>(buffer), size);
+ return ifs_.gcount();
+ }
+
+ private:
+ std::ifstream ifs_;
+};
+} // namespace
+
+bool PortableReadFileToProto(const std::string& file_name,
+ ::google::protobuf::MessageLite* proto) {
+ ::google::protobuf::io::CopyingInputStreamAdaptor stream(
+ new IfstreamInputStream(file_name));
+ stream.SetOwnsCopyingStream(true);
+ // TODO(jiayq): the following coded stream is for debugging purposes to allow
+ // one to parse arbitrarily large messages for MessageLite. One most likely
+ // doesn't want to put protobufs larger than 64MB on Android, so we should
+ // eventually remove this and quit loud when a large protobuf is passed in.
+ ::google::protobuf::io::CodedInputStream coded_stream(&stream);
+ // Total bytes hard limit / warning limit are set to 1GB and 512MB
+ // respectively.
+ coded_stream.SetTotalBytesLimit(1024LL << 20, 512LL << 20);
+ return proto->ParseFromCodedStream(&coded_stream);
+}
+
+bool IsAsset(const char* const filename) {
+ return strstr(filename, ASSET_PREFIX) == filename;
+}
+
+void ReadFileToProto(AAssetManager* const asset_manager,
+ const char* const filename,
+ google::protobuf::MessageLite* message) {
+ if (!IsAsset(filename)) {
+ VLOG(0) << "Opening file: " << filename;
+ CHECK(PortableReadFileToProto(filename, message));
+ return;
+ }
+
+ CHECK_NOTNULL(asset_manager);
+
+ const char* const asset_filename = filename + strlen(ASSET_PREFIX);
+ AAsset* asset = AAssetManager_open(asset_manager,
+ asset_filename,
+ AASSET_MODE_STREAMING);
+ CHECK_NOTNULL(asset);
+
+ off_t start;
+ off_t length;
+ const int fd = AAsset_openFileDescriptor(asset, &start, &length);
+
+ if (fd >= 0) {
+ // If it has a file descriptor that means it can be memmapped directly
+ // from the APK.
+ VLOG(0) << "Opening asset " << asset_filename
+ << " from disk with zero-copy.";
+ google::protobuf::io::FileInputStream is(fd);
+ google::protobuf::io::LimitingInputStream lis(&is, start + length);
+ lis.Skip(start);
+ CHECK(message->ParseFromZeroCopyStream(&lis));
+ is.Close();
+ } else {
+ // It may be compressed, in which case we have to uncompress
+ // it to memory first.
+ VLOG(0) << "Opening asset " << asset_filename
+ << " from disk with copy.";
+ const off_t data_size = AAsset_getLength(asset);
+ const void* const memory = AAsset_getBuffer(asset);
+ CHECK(message->ParseFromArray(memory, data_size));
+ }
+ AAsset_close(asset);
+}
+
+void ReadFileToString(AAssetManager* const asset_manager,
+ const char* const filename, std::string* str) {
+ if (!IsAsset(filename)) {
+ VLOG(0) << "Opening file: " << filename;
+ std::ifstream t(filename);
+ std::string tmp((std::istreambuf_iterator<char>(t)),
+ std::istreambuf_iterator<char>());
+ tmp.swap(*str);
+ t.close();
+ return;
+ }
+
+ CHECK_NOTNULL(asset_manager);
+ const char* const asset_filename = filename + strlen(ASSET_PREFIX);
+ AAsset* asset = AAssetManager_open(asset_manager,
+ asset_filename,
+ AASSET_MODE_STREAMING);
+ CHECK_NOTNULL(asset);
+ VLOG(0) << "Opening asset " << asset_filename << " from disk with copy.";
+ const off_t data_size = AAsset_getLength(asset);
+ const char* memory = reinterpret_cast<const char*>(AAsset_getBuffer(asset));
+
+ std::string tmp(memory, memory + data_size);
+ tmp.swap(*str);
+ AAsset_close(asset);
+}
+
+void ReadFileToVector(AAssetManager* const asset_manager,
+ const char* const filename,
+ std::vector<std::string>* str_vector) {
+ std::string labels_string;
+ ReadFileToString(asset_manager, filename, &labels_string);
+ std::istringstream ifs(labels_string);
+ str_vector->clear();
+ std::string label;
+ while (std::getline(ifs, label)) {
+ str_vector->push_back(label);
+ }
+ VLOG(0) << "Read " << str_vector->size() << " values from " << filename;
+}
+
diff --git a/tensorflow/examples/android/jni/jni_utils.h b/tensorflow/examples/android/jni/jni_utils.h
new file mode 100644
index 0000000000..9bd8d2c21f
--- /dev/null
+++ b/tensorflow/examples/android/jni/jni_utils.h
@@ -0,0 +1,30 @@
+#ifndef ORG_TENSORFLOW_JNI_JNI_UTILS_H_ // NOLINT
+#define ORG_TENSORFLOW_JNI_JNI_UTILS_H_ // NOLINT
+
+#include <jni.h>
+#include <string>
+#include <vector>
+
+#include "tensorflow/core/platform/port.h"
+
+namespace google {
+namespace protobuf {
+class MessageLite;
+} // google
+} // protobuf
+
+class AAssetManager;
+
+bool PortableReadFileToProto(const std::string& file_name,
+ ::google::protobuf::MessageLite* proto);
+
+void ReadFileToProto(AAssetManager* const asset_manager,
+ const char* const filename, google::protobuf::MessageLite* message);
+
+void ReadFileToString(AAssetManager* const asset_manager,
+ const char* const filename, std::string* str);
+
+void ReadFileToVector(AAssetManager* const asset_manager,
+ const char* const filename, std::vector<std::string>* str_vector);
+
+#endif // ORG_TENSORFLOW_JNI_JNI_UTILS_H_
diff --git a/tensorflow/examples/android/jni/libpthread.so b/tensorflow/examples/android/jni/libpthread.so
new file mode 100755
index 0000000000..7992d0de4c
--- /dev/null
+++ b/tensorflow/examples/android/jni/libpthread.so
Binary files differ
diff --git a/tensorflow/examples/android/jni/rgb2yuv.cc b/tensorflow/examples/android/jni/rgb2yuv.cc
new file mode 100755
index 0000000000..428f311eb8
--- /dev/null
+++ b/tensorflow/examples/android/jni/rgb2yuv.cc
@@ -0,0 +1,89 @@
+// These utility functions allow for the conversion of RGB data to YUV data.
+
+#include "tensorflow/examples/android/jni/rgb2yuv.h"
+
+#include "tensorflow/core/platform/port.h"
+
+using namespace tensorflow;
+
+static inline void WriteYUV(const int x, const int y, const int width,
+ const int r8, const int g8, const int b8,
+ uint8* const pY,
+ uint8* const pUV) {
+ // Using formulas from http://msdn.microsoft.com/en-us/library/ms893078
+ *pY = ((66 * r8 + 129 * g8 + 25 * b8 + 128) >> 8) + 16;
+
+ // Odd widths get rounded up so that UV blocks on the side don't get cut off.
+ const int blocks_per_row = (width + 1) / 2;
+
+ // 2 bytes per UV block
+ const int offset = 2 * (((y / 2) * blocks_per_row + (x / 2)));
+
+ // U and V are the average values of all 4 pixels in the block.
+ if (!(x & 1) && !(y & 1)) {
+ // Explicitly clear the block if this is the first pixel in it.
+ pUV[offset] = 0;
+ pUV[offset + 1] = 0;
+ }
+
+ // V (with divide by 4 factored in)
+#ifdef __APPLE__
+ const int u_offset = 0;
+ const int v_offset = 1;
+#else
+ const int u_offset = 1;
+ const int v_offset = 0;
+#endif
+ pUV[offset + v_offset] += ((112 * r8 - 94 * g8 - 18 * b8 + 128) >> 10) + 32;
+
+ // U (with divide by 4 factored in)
+ pUV[offset + u_offset] += ((-38 * r8 - 74 * g8 + 112 * b8 + 128) >> 10) + 32;
+}
+
+void ConvertARGB8888ToYUV420SP(const uint32* const input, uint8* const output,
+ int width, int height) {
+ uint8* pY = output;
+ uint8* pUV = output + (width * height);
+ const uint32* in = input;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ const uint32 rgb = *in++;
+#ifdef __APPLE__
+ const int nB = (rgb >> 8) & 0xFF;
+ const int nG = (rgb >> 16) & 0xFF;
+ const int nR = (rgb >> 24) & 0xFF;
+#else
+ const int nR = (rgb >> 16) & 0xFF;
+ const int nG = (rgb >> 8) & 0xFF;
+ const int nB = rgb & 0xFF;
+#endif
+ WriteYUV(x, y, width, nR, nG, nB, pY++, pUV);
+ }
+ }
+}
+
+void ConvertRGB565ToYUV420SP(const uint16* const input, uint8* const output,
+ const int width, const int height) {
+ uint8* pY = output;
+ uint8* pUV = output + (width * height);
+ const uint16* in = input;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ const uint32 rgb = *in++;
+
+ const int r5 = ((rgb >> 11) & 0x1F);
+ const int g6 = ((rgb >> 5) & 0x3F);
+ const int b5 = (rgb & 0x1F);
+
+ // Shift left, then fill in the empty low bits with a copy of the high
+ // bits so we can stretch across the entire 0 - 255 range.
+ const int r8 = r5 << 3 | r5 >> 2;
+ const int g8 = g6 << 2 | g6 >> 4;
+ const int b8 = b5 << 3 | b5 >> 2;
+
+ WriteYUV(x, y, width, r8, g8, b8, pY++, pUV);
+ }
+ }
+}
diff --git a/tensorflow/examples/android/jni/rgb2yuv.h b/tensorflow/examples/android/jni/rgb2yuv.h
new file mode 100755
index 0000000000..e5eb5aa419
--- /dev/null
+++ b/tensorflow/examples/android/jni/rgb2yuv.h
@@ -0,0 +1,23 @@
+#ifndef ORG_TENSORFLOW_JNI_IMAGEUTILS_RGB2YUV_H_
+#define ORG_TENSORFLOW_JNI_IMAGEUTILS_RGB2YUV_H_
+
+#include "tensorflow/core/platform/port.h"
+
+using namespace tensorflow;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void ConvertARGB8888ToYUV420SP(const uint32* const input, uint8* const output,
+ int width, int height);
+
+void ConvertRGB565ToYUV420SP(const uint16* const input,
+ uint8* const output,
+ const int width, const int height);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // ORG_TENSORFLOW_JNI_IMAGEUTILS_RGB2YUV_H_
diff --git a/tensorflow/examples/android/jni/tensorflow_jni.cc b/tensorflow/examples/android/jni/tensorflow_jni.cc
new file mode 100644
index 0000000000..39d0bb1249
--- /dev/null
+++ b/tensorflow/examples/android/jni/tensorflow_jni.cc
@@ -0,0 +1,253 @@
+#include "tensorflow/examples/android/jni/tensorflow_jni.h"
+
+#include <android/asset_manager.h>
+#include <android/asset_manager_jni.h>
+#include <android/bitmap.h>
+
+#include <jni.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <queue>
+#include <sstream>
+#include <string>
+
+#include "tensorflow/core/framework/types.pb.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/port.h"
+#include "tensorflow/core/public/env.h"
+#include "tensorflow/core/public/session.h"
+#include "tensorflow/core/public/tensor.h"
+#include "tensorflow/examples/android/jni/jni_utils.h"
+
+// Global variables that holds the Tensorflow classifier.
+static std::unique_ptr<tensorflow::Session> session;
+
+static std::vector<std::string> g_label_strings;
+static bool g_compute_graph_initialized = false;
+//static mutex g_compute_graph_mutex(base::LINKER_INITIALIZED);
+
+static int g_tensorflow_input_size; // The image size for the mognet input.
+static int g_image_mean; // The image mean.
+
+using namespace tensorflow;
+
+JNIEXPORT jint JNICALL
+TENSORFLOW_METHOD(initializeTensorflow)(
+ JNIEnv* env, jobject thiz, jobject java_asset_manager,
+ jstring model, jstring labels,
+ jint num_classes, jint mognet_input_size, jint image_mean) {
+ //MutexLock input_lock(&g_compute_graph_mutex);
+ if (g_compute_graph_initialized) {
+ LOG(INFO) << "Compute graph already loaded. skipping.";
+ return 0;
+ }
+
+ const char* const model_cstr = env->GetStringUTFChars(model, NULL);
+ const char* const labels_cstr = env->GetStringUTFChars(labels, NULL);
+
+ g_tensorflow_input_size = mognet_input_size;
+ g_image_mean = image_mean;
+
+ LOG(INFO) << "Loading Tensorflow.";
+
+ LOG(INFO) << "Making new SessionOptions.";
+ tensorflow::SessionOptions options;
+ tensorflow::ConfigProto& config = options.config;
+ LOG(INFO) << "Got config, " << config.device_count_size() << " devices";
+
+ session.reset(tensorflow::NewSession(options));
+ LOG(INFO) << "Session created.";
+
+ tensorflow::GraphDef tensorflow_graph;
+ LOG(INFO) << "Graph created.";
+
+ AAssetManager* const asset_manager =
+ AAssetManager_fromJava(env, java_asset_manager);
+ LOG(INFO) << "Acquired AssetManager.";
+
+ LOG(INFO) << "Reading file to proto: " << model_cstr;
+ ReadFileToProto(asset_manager, model_cstr, &tensorflow_graph);
+
+ LOG(INFO) << "Creating session.";
+ tensorflow::Status s = session->Create(tensorflow_graph);
+ if (!s.ok()) {
+ LOG(ERROR) << "Could not create Tensorflow Graph: " << s;
+ return -1;
+ }
+
+ // Clear the proto to save memory space.
+ tensorflow_graph.Clear();
+ LOG(INFO) << "Tensorflow graph loaded from: " << model_cstr;
+
+ // Read the label list
+ ReadFileToVector(asset_manager, labels_cstr, &g_label_strings);
+ LOG(INFO) << g_label_strings.size() << " label strings loaded from: "
+ << labels_cstr;
+ g_compute_graph_initialized = true;
+
+ return 0;
+}
+
+namespace {
+typedef struct {
+ uint8 red;
+ uint8 green;
+ uint8 blue;
+ uint8 alpha;
+} RGBA;
+} // namespace
+
+// Returns the top N confidence values over threshold in the provided vector,
+// sorted by confidence in descending order.
+static void GetTopN(
+ const Eigen::TensorMap<Eigen::Tensor<float, 1, Eigen::RowMajor>,
+ Eigen::Aligned>& prediction,
+ const int num_results, const float threshold,
+ std::vector<std::pair<float, int> >* top_results) {
+ // Will contain top N results in ascending order.
+ std::priority_queue<std::pair<float, int>,
+ std::vector<std::pair<float, int> >,
+ std::greater<std::pair<float, int> > > top_result_pq;
+
+ const int count = prediction.size();
+ for (int i = 0; i < count; ++i) {
+ const float value = prediction(i);
+
+ // Only add it if it beats the threshold and has a chance at being in
+ // the top N.
+ if (value < threshold) {
+ continue;
+ }
+
+ top_result_pq.push(std::pair<float, int>(value, i));
+
+ // If at capacity, kick the smallest value out.
+ if (top_result_pq.size() > num_results) {
+ top_result_pq.pop();
+ }
+ }
+
+ // Copy to output vector and reverse into descending order.
+ while (!top_result_pq.empty()) {
+ top_results->push_back(top_result_pq.top());
+ top_result_pq.pop();
+ }
+ std::reverse(top_results->begin(), top_results->end());
+}
+
+static std::string ClassifyImage(const RGBA* const bitmap_src,
+ const int in_stride,
+ const int width, const int height) {
+ // Create input tensor
+ tensorflow::Tensor input_tensor(
+ tensorflow::DT_FLOAT,
+ tensorflow::TensorShape({
+ 1, g_tensorflow_input_size, g_tensorflow_input_size, 3}));
+
+ auto input_tensor_mapped = input_tensor.tensor<float, 4>();
+
+ LOG(INFO) << "Tensorflow: Copying Data.";
+ for (int i = 0; i < g_tensorflow_input_size; ++i) {
+ const RGBA* src = bitmap_src + i * g_tensorflow_input_size;
+ for (int j = 0; j < g_tensorflow_input_size; ++j) {
+ // Copy 3 values
+ input_tensor_mapped(0, i, j, 0) =
+ static_cast<float>(src->red) - g_image_mean;
+ input_tensor_mapped(0, i, j, 1) =
+ static_cast<float>(src->green) - g_image_mean;
+ input_tensor_mapped(0, i, j, 2) =
+ static_cast<float>(src->blue) - g_image_mean;
+ ++src;
+ }
+ }
+
+ std::vector<std::pair<std::string, tensorflow::Tensor> > input_tensors(
+ {{"input:0", input_tensor}});
+
+ VLOG(0) << "Start computing.";
+ std::vector<tensorflow::Tensor> output_tensors;
+ std::vector<std::string> output_names({"output:0"});
+
+ tensorflow::Status s =
+ session->Run(input_tensors, output_names, {}, &output_tensors);
+ VLOG(0) << "End computing.";
+
+ if (!s.ok()) {
+ LOG(ERROR) << "Error during inference: " << s;
+ return "";
+ }
+
+ VLOG(0) << "Reading from layer " << output_names[0];
+ tensorflow::Tensor* output = &output_tensors[0];
+ const int kNumResults = 5;
+ const float kThreshold = 0.1f;
+ std::vector<std::pair<float, int> > top_results;
+ GetTopN(output->flat<float>(), kNumResults, kThreshold, &top_results);
+
+ std::stringstream ss;
+ ss.precision(3);
+ for (const auto& result : top_results) {
+ const float confidence = result.first;
+ const int index = result.second;
+
+ ss << index << " " << confidence << " ";
+
+ // Write out the result as a string
+ if (index < g_label_strings.size()) {
+ // just for safety: theoretically, the output is under 1000 unless there
+ // is some numerical issues leading to a wrong prediction.
+ ss << g_label_strings[index];
+ } else {
+ ss << "Prediction: " << index;
+ }
+
+ ss << "\n";
+ }
+
+ LOG(INFO) << "Predictions: " << ss.str();
+ return ss.str();
+}
+
+JNIEXPORT jstring JNICALL
+TENSORFLOW_METHOD(classifyImageRgb)(
+ JNIEnv* env, jobject thiz, jintArray image, jint width, jint height) {
+ // Copy image into currFrame.
+ jboolean iCopied = JNI_FALSE;
+ jint* pixels = env->GetIntArrayElements(image, &iCopied);
+
+ std::string result = ClassifyImage(
+ reinterpret_cast<const RGBA*>(pixels), width * 4, width, height);
+
+ env->ReleaseIntArrayElements(image, pixels, JNI_ABORT);
+
+ return env->NewStringUTF(result.c_str());
+}
+
+JNIEXPORT jstring JNICALL
+TENSORFLOW_METHOD(classifyImageBmp)(
+ JNIEnv* env, jobject thiz, jobject bitmap) {
+ // Obtains the bitmap information.
+ AndroidBitmapInfo info;
+ CHECK_EQ(AndroidBitmap_getInfo(env, bitmap, &info),
+ ANDROID_BITMAP_RESULT_SUCCESS);
+ void* pixels;
+ CHECK_EQ(AndroidBitmap_lockPixels(env, bitmap, &pixels),
+ ANDROID_BITMAP_RESULT_SUCCESS);
+ LOG(INFO) << "Height: " << info.height;
+ LOG(INFO) << "Width: " << info.width;
+ LOG(INFO) << "Stride: " << info.stride;
+ // TODO(jiayq): deal with other formats if necessary.
+ if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
+ return env->NewStringUTF(
+ "Error: Android system is not using RGBA_8888 in default.");
+ }
+
+ std::string result = ClassifyImage(
+ static_cast<const RGBA*>(pixels), info.stride, info.width, info.height);
+
+ // Finally, unlock the pixels
+ CHECK_EQ(AndroidBitmap_unlockPixels(env, bitmap),
+ ANDROID_BITMAP_RESULT_SUCCESS);
+
+ return env->NewStringUTF(result.c_str());
+}
diff --git a/tensorflow/examples/android/jni/tensorflow_jni.h b/tensorflow/examples/android/jni/tensorflow_jni.h
new file mode 100644
index 0000000000..2de353bac8
--- /dev/null
+++ b/tensorflow/examples/android/jni/tensorflow_jni.h
@@ -0,0 +1,36 @@
+// The methods are exposed to Java to allow for interaction with the native
+// Tensorflow code. See
+// tensorflow/examples/android/src/org/tensorflow/TensorflowClassifier.java
+// for the Java counterparts.
+
+#ifndef ORG_TENSORFLOW_JNI_TENSORFLOW_JNI_H_ // NOLINT
+#define ORG_TENSORFLOW_JNI_TENSORFLOW_JNI_H_ // NOLINT
+
+#include <jni.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif // __cplusplus
+
+#define TENSORFLOW_METHOD(METHOD_NAME) \
+ Java_org_tensorflow_demo_TensorflowClassifier_##METHOD_NAME // NOLINT
+
+JNIEXPORT jint JNICALL
+TENSORFLOW_METHOD(initializeTensorflow)(
+ JNIEnv* env, jobject thiz, jobject java_asset_manager,
+ jstring model, jstring labels,
+ jint num_classes, jint mognet_input_size, jint image_mean);
+
+JNIEXPORT jstring JNICALL
+TENSORFLOW_METHOD(classifyImageBmp)(
+ JNIEnv* env, jobject thiz, jobject bitmap);
+
+JNIEXPORT jstring JNICALL
+TENSORFLOW_METHOD(classifyImageRgb)(
+ JNIEnv* env, jobject thiz, jintArray image, jint width, jint height);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+#endif // ORG_TENSORFLOW_JNI_TENSORFLOW_JNI_H_ // NOLINT
diff --git a/tensorflow/examples/android/jni/yuv2rgb.cc b/tensorflow/examples/android/jni/yuv2rgb.cc
new file mode 100644
index 0000000000..93694e492d
--- /dev/null
+++ b/tensorflow/examples/android/jni/yuv2rgb.cc
@@ -0,0 +1,161 @@
+// This is a collection of routines which converts various YUV image formats
+// to ARGB.
+
+#include "tensorflow/examples/android/jni/yuv2rgb.h"
+
+#ifndef MAX
+#define MAX(a, b) ({__typeof__(a) _a = (a); __typeof__(b) _b = (b); _a > _b ? _a : _b; })
+#define MIN(a, b) ({__typeof__(a) _a = (a); __typeof__(b) _b = (b); _a < _b ? _a : _b; })
+#endif
+
+// This value is 2 ^ 18 - 1, and is used to clamp the RGB values before their ranges
+// are normalized to eight bits.
+static const int kMaxChannelValue = 262143;
+
+// Accepts a YUV 4:2:0 image with a plane of 8 bit Y samples followed by an
+// interleaved U/V plane containing 8 bit 2x2 subsampled chroma samples,
+// except the interleave order of U and V is reversed. Converts to a packed
+// ARGB 32 bit output of the same pixel dimensions.
+void ConvertYUV420SPToARGB8888(const uint8* const yData,
+ const uint8* const uvData,
+ uint32* const output, const int width,
+ const int height) {
+ const uint8* pY = yData;
+ const uint8* pUV = uvData;
+ uint32* out = output;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ int nY = *pY++;
+ int offset = (y >> 1) * width + 2 * (x >> 1);
+#ifdef __APPLE__
+ int nU = pUV[offset];
+ int nV = pUV[offset + 1];
+#else
+ int nV = pUV[offset];
+ int nU = pUV[offset + 1];
+#endif
+
+ nY -= 16;
+ nU -= 128;
+ nV -= 128;
+ if (nY < 0) nY = 0;
+
+ // This is the floating point equivalent. We do the conversion in integer
+ // because some Android devices do not have floating point in hardware.
+ // nR = (int)(1.164 * nY + 2.018 * nU);
+ // nG = (int)(1.164 * nY - 0.813 * nV - 0.391 * nU);
+ // nB = (int)(1.164 * nY + 1.596 * nV);
+
+ int nR = (int)(1192 * nY + 1634 * nV);
+ int nG = (int)(1192 * nY - 833 * nV - 400 * nU);
+ int nB = (int)(1192 * nY + 2066 * nU);
+
+ nR = MIN(kMaxChannelValue, MAX(0, nR));
+ nG = MIN(kMaxChannelValue, MAX(0, nG));
+ nB = MIN(kMaxChannelValue, MAX(0, nB));
+
+ nR = (nR >> 10) & 0xff;
+ nG = (nG >> 10) & 0xff;
+ nB = (nB >> 10) & 0xff;
+ *out++ = 0xff000000 | (nR << 16) | (nG << 8) | nB;
+ }
+ }
+}
+
+// The same as above, but downsamples each dimension to half size.
+void ConvertYUV420SPToARGB8888HalfSize(const uint8* const input,
+ uint32* const output,
+ int width, int height) {
+ const uint8* pY = input;
+ const uint8* pUV = input + (width * height);
+ uint32* out = output;
+ int stride = width;
+ width >>= 1;
+ height >>= 1;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ int nY = (pY[0] + pY[1] + pY[stride] + pY[stride + 1]) >> 2;
+ pY += 2;
+#ifdef __APPLE__
+ int nU = *pUV++;
+ int nV = *pUV++;
+#else
+ int nV = *pUV++;
+ int nU = *pUV++;
+#endif
+
+ nY -= 16;
+ nU -= 128;
+ nV -= 128;
+ if (nY < 0) nY = 0;
+
+ int nR = (int)(1192 * nY + 1634 * nV);
+ int nG = (int)(1192 * nY - 833 * nV - 400 * nU);
+ int nB = (int)(1192 * nY + 2066 * nU);
+
+ nR = MIN(kMaxChannelValue, MAX(0, nR));
+ nG = MIN(kMaxChannelValue, MAX(0, nG));
+ nB = MIN(kMaxChannelValue, MAX(0, nB));
+
+ nR = (nR >> 10) & 0xff;
+ nG = (nG >> 10) & 0xff;
+ nB = (nB >> 10) & 0xff;
+ *out++ = 0xff000000 | (nR << 16) | (nG << 8) | nB;
+ }
+ pY += stride;
+ }
+}
+
+// Accepts a YUV 4:2:0 image with a plane of 8 bit Y samples followed by an
+// interleaved U/V plane containing 8 bit 2x2 subsampled chroma samples,
+// except the interleave order of U and V is reversed. Converts to a packed
+// RGB 565 bit output of the same pixel dimensions.
+void ConvertYUV420SPToRGB565(const uint8* const input, uint16* const output,
+ const int width, const int height) {
+ const uint8* pY = input;
+ const uint8* pUV = input + (width * height);
+ uint16 *out = output;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ int nY = *pY++;
+ int offset = (y >> 1) * width + 2 * (x >> 1);
+#ifdef __APPLE__
+ int nU = pUV[offset];
+ int nV = pUV[offset + 1];
+#else
+ int nV = pUV[offset];
+ int nU = pUV[offset + 1];
+#endif
+
+ nY -= 16;
+ nU -= 128;
+ nV -= 128;
+ if (nY < 0) nY = 0;
+
+ // This is the floating point equivalent. We do the conversion in integer
+ // because some Android devices do not have floating point in hardware.
+ // nR = (int)(1.164 * nY + 2.018 * nU);
+ // nG = (int)(1.164 * nY - 0.813 * nV - 0.391 * nU);
+ // nB = (int)(1.164 * nY + 1.596 * nV);
+
+ int nR = (int)(1192 * nY + 1634 * nV);
+ int nG = (int)(1192 * nY - 833 * nV - 400 * nU);
+ int nB = (int)(1192 * nY + 2066 * nU);
+
+ nR = MIN(kMaxChannelValue, MAX(0, nR));
+ nG = MIN(kMaxChannelValue, MAX(0, nG));
+ nB = MIN(kMaxChannelValue, MAX(0, nB));
+
+ // Shift more than for ARGB8888 and apply appropriate bitmask.
+ nR = (nR >> 13) & 0x1f;
+ nG = (nG >> 12) & 0x3f;
+ nB = (nB >> 13) & 0x1f;
+
+ // R is high 5 bits, G is middle 6 bits, and B is low 5 bits.
+ *out++ = (nR << 11) | (nG << 5) | nB;
+ }
+ }
+}
diff --git a/tensorflow/examples/android/jni/yuv2rgb.h b/tensorflow/examples/android/jni/yuv2rgb.h
new file mode 100644
index 0000000000..698da415f5
--- /dev/null
+++ b/tensorflow/examples/android/jni/yuv2rgb.h
@@ -0,0 +1,37 @@
+// This is a collection of routines which converts various YUV image formats
+// to (A)RGB.
+
+#ifndef ORG_TENSORFLOW_JNI_IMAGEUTILS_YUV2RGB_H_
+#define ORG_TENSORFLOW_JNI_IMAGEUTILS_YUV2RGB_H_
+
+#include "tensorflow/core/platform/port.h"
+
+using namespace tensorflow;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Converts YUV420 semi-planar data to ARGB 8888 data using the supplied width
+// and height. The input and output must already be allocated and non-null.
+// For efficiency, no error checking is performed.
+void ConvertYUV420SPToARGB8888(const uint8* const pY, const uint8* const pUV,
+ uint32* const output, const int width,
+ const int height);
+
+// The same as above, but downsamples each dimension to half size.
+void ConvertYUV420SPToARGB8888HalfSize(const uint8* const input,
+ uint32* const output,
+ int width, int height);
+
+// Converts YUV420 semi-planar data to RGB 565 data using the supplied width
+// and height. The input and output must already be allocated and non-null.
+// For efficiency, no error checking is performed.
+void ConvertYUV420SPToRGB565(const uint8* const input, uint16* const output,
+ const int width, const int height);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // ORG_TENSORFLOW_JNI_IMAGEUTILS_YUV2RGB_H_