aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm')
-rw-r--r--tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm62
1 files changed, 41 insertions, 21 deletions
diff --git a/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm b/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm
index dc79e7a12a..86570b19d2 100644
--- a/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm
+++ b/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm
@@ -22,6 +22,27 @@
#include "tensorflow_utils.h"
+// If you have your own model, modify this to the file name, and make sure
+// you've added the file to your app resources too.
+static NSString* model_file_name = @"tensorflow_inception_graph";
+static NSString* model_file_type = @"pb";
+// This controls whether we'll be loading a plain GraphDef proto, or a
+// file created by the convert_graphdef_memmapped_format utility that wraps a
+// GraphDef and parameter file that can be mapped into memory from file to
+// reduce overall memory usage.
+const bool model_uses_memory_mapping = false;
+// If you have your own model, point this to the labels file.
+static NSString* labels_file_name = @"imagenet_comp_graph_label_strings";
+static NSString* labels_file_type = @"txt";
+// These dimensions need to match those the model was trained with.
+const int wanted_input_width = 224;
+const int wanted_input_height = 224;
+const int wanted_input_channels = 3;
+const float input_mean = 117.0f;
+const float input_std = 1.0f;
+const std::string input_layer_name = "input";
+const std::string output_layer_name = "softmax1";
+
static const NSString *AVCaptureStillImageIsCapturingStillImageContext =
@"AVCaptureStillImageIsCapturingStillImageContext";
@@ -269,39 +290,32 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
}
const int image_channels = 4;
- const int wanted_width = 224;
- const int wanted_height = 224;
- const int wanted_channels = 3;
- const float input_mean = 117.0f;
- const float input_std = 1.0f;
- assert(image_channels >= wanted_channels);
+ assert(image_channels >= wanted_input_channels);
tensorflow::Tensor image_tensor(
tensorflow::DT_FLOAT,
tensorflow::TensorShape(
- {1, wanted_height, wanted_width, wanted_channels}));
+ {1, wanted_input_height, wanted_input_width, wanted_input_channels}));
auto image_tensor_mapped = image_tensor.tensor<float, 4>();
tensorflow::uint8 *in = sourceStartAddr;
float *out = image_tensor_mapped.data();
- for (int y = 0; y < wanted_height; ++y) {
- float *out_row = out + (y * wanted_width * wanted_channels);
- for (int x = 0; x < wanted_width; ++x) {
- const int in_x = (y * image_width) / wanted_width;
- const int in_y = (x * image_height) / wanted_height;
+ for (int y = 0; y < wanted_input_height; ++y) {
+ float *out_row = out + (y * wanted_input_width * wanted_input_channels);
+ for (int x = 0; x < wanted_input_width; ++x) {
+ const int in_x = (y * image_width) / wanted_input_width;
+ const int in_y = (x * image_height) / wanted_input_height;
tensorflow::uint8 *in_pixel =
in + (in_y * image_width * image_channels) + (in_x * image_channels);
- float *out_pixel = out_row + (x * wanted_channels);
- for (int c = 0; c < wanted_channels; ++c) {
+ float *out_pixel = out_row + (x * wanted_input_channels);
+ for (int c = 0; c < wanted_input_channels; ++c) {
out_pixel[c] = (in_pixel[c] - input_mean) / input_std;
}
}
}
if (tf_session.get()) {
- std::string input_layer = "input";
- std::string output_layer = "output";
std::vector<tensorflow::Tensor> outputs;
tensorflow::Status run_status = tf_session->Run(
- {{input_layer, image_tensor}}, {output_layer}, {}, &outputs);
+ {{input_layer_name, image_tensor}}, {output_layer_name}, {}, &outputs);
if (!run_status.ok()) {
LOG(ERROR) << "Running model failed:" << run_status;
} else {
@@ -362,22 +376,28 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
- (void)viewDidLoad {
[super viewDidLoad];
- [self setupAVCapture];
square = [[UIImage imageNamed:@"squarePNG"] retain];
synth = [[AVSpeechSynthesizer alloc] init];
labelLayers = [[NSMutableArray alloc] init];
oldPredictionValues = [[NSMutableDictionary alloc] init];
- tensorflow::Status load_status =
- LoadModel(@"tensorflow_inception_graph", @"pb", &tf_session);
+
+ tensorflow::Status load_status;
+ if (model_uses_memory_mapping) {
+ load_status = LoadMemoryMappedModel(
+ model_file_name, model_file_type, &tf_session, &tf_memmapped_env);
+ } else {
+ load_status = LoadModel(model_file_name, model_file_type, &tf_session);
+ }
if (!load_status.ok()) {
LOG(FATAL) << "Couldn't load model: " << load_status;
}
tensorflow::Status labels_status =
- LoadLabels(@"imagenet_comp_graph_label_strings", @"txt", &labels);
+ LoadLabels(labels_file_name, labels_file_type, &labels);
if (!labels_status.ok()) {
LOG(FATAL) << "Couldn't load labels: " << labels_status;
}
+ [self setupAVCapture];
}
- (void)viewDidUnload {