aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/contrib/lite')
-rw-r--r--tensorflow/contrib/lite/Makefile3
-rwxr-xr-xtensorflow/contrib/lite/download_dependencies.sh6
-rw-r--r--tensorflow/contrib/lite/examples/ios/camera/tflite_camera_example.xcodeproj/project.pbxproj8
-rw-r--r--tensorflow/contrib/lite/g3doc/apis.md2
-rw-r--r--tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/Camera2BasicFragment.java23
-rw-r--r--tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/ImageClassifier.java10
-rw-r--r--tensorflow/contrib/lite/java/demo/app/src/main/res/layout/fragment_camera2_basic.xml41
-rw-r--r--tensorflow/contrib/lite/java/demo/app/src/main/res/values/strings.xml2
-rw-r--r--tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Interpreter.java7
-rw-r--r--tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java6
-rw-r--r--tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.cc10
-rw-r--r--tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h12
-rw-r--r--tensorflow/contrib/lite/kernels/add.cc2
-rw-r--r--tensorflow/contrib/lite/kernels/div.cc5
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h2
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h39
-rw-r--r--tensorflow/contrib/lite/kernels/sub.cc3
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_tensorflow_merge.cc2
-rw-r--r--tensorflow/contrib/lite/toco/model.h6
19 files changed, 154 insertions, 35 deletions
diff --git a/tensorflow/contrib/lite/Makefile b/tensorflow/contrib/lite/Makefile
index b4504f246a..65fba52d46 100644
--- a/tensorflow/contrib/lite/Makefile
+++ b/tensorflow/contrib/lite/Makefile
@@ -90,7 +90,8 @@ $(wildcard tensorflow/contrib/lite/kernels/*.c) \
$(wildcard tensorflow/contrib/lite/kernels/internal/*.c) \
$(wildcard tensorflow/contrib/lite/kernels/internal/optimized/*.c) \
$(wildcard tensorflow/contrib/lite/kernels/internal/reference/*.c) \
-$(wildcard tensorflow/contrib/lite/downloads/farmhash/src/farmhash.cc)
+$(wildcard tensorflow/contrib/lite/downloads/farmhash/src/farmhash.cc) \
+$(wildcard tensorflow/contrib/lite/downloads/fft2d/fftsg.c)
# Remove any duplicates.
CORE_CC_ALL_SRCS := $(sort $(CORE_CC_ALL_SRCS))
CORE_CC_EXCLUDE_SRCS := \
diff --git a/tensorflow/contrib/lite/download_dependencies.sh b/tensorflow/contrib/lite/download_dependencies.sh
index a93ed201d6..436c3e1d4c 100755
--- a/tensorflow/contrib/lite/download_dependencies.sh
+++ b/tensorflow/contrib/lite/download_dependencies.sh
@@ -30,12 +30,15 @@ if [ ! -f $BZL_FILE_PATH ]; then
fi
EIGEN_URL="$(grep -o 'http.*bitbucket.org/eigen/eigen/get/.*tar\.gz' "${BZL_FILE_PATH}" | grep -v mirror.bazel | head -n1)"
-GEMMLOWP_URL="$(grep -o 'https://mirror.bazel.build/github.com/google/gemmlowp/.*zip' "${BZL_FILE_PATH}" | head -n1)"
+# TODO (yongtang): Replace the following with 'https://mirror.bazel.build/github.com/google/gemmlowp/.*zip' once
+# the archive has been propagated in mirror.bazel.build.
+GEMMLOWP_URL="$(grep -o 'https://github.com/google/gemmlowp/.*zip' "${BZL_FILE_PATH}" | head -n1)"
GOOGLETEST_URL="https://github.com/google/googletest/archive/release-1.8.0.tar.gz"
ABSL_URL="$(grep -o 'https://github.com/abseil/abseil-cpp/.*tar.gz' "${BZL_FILE_PATH}" | head -n1)"
NEON_2_SSE_URL="https://github.com/intel/ARM_NEON_2_x86_SSE/archive/master.zip"
FARMHASH_URL="https://mirror.bazel.build/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz"
FLATBUFFERS_URL="https://github.com/google/flatbuffers/archive/master.zip"
+FFT2D_URL="https://mirror.bazel.build/www.kurims.kyoto-u.ac.jp/~ooura/fft.tgz"
# TODO(petewarden): Some new code in Eigen triggers a clang bug with iOS arm64,
# so work around it by patching the source.
@@ -91,6 +94,7 @@ download_and_extract "${ABSL_URL}" "${DOWNLOADS_DIR}/absl"
download_and_extract "${NEON_2_SSE_URL}" "${DOWNLOADS_DIR}/neon_2_sse"
download_and_extract "${FARMHASH_URL}" "${DOWNLOADS_DIR}/farmhash"
download_and_extract "${FLATBUFFERS_URL}" "${DOWNLOADS_DIR}/flatbuffers"
+download_and_extract "${FFT2D_URL}" "${DOWNLOADS_DIR}/fft2d"
replace_by_sed 's#static uint32x4_t p4ui_CONJ_XOR = vld1q_u32( conj_XOR_DATA );#static uint32x4_t p4ui_CONJ_XOR; // = vld1q_u32( conj_XOR_DATA ); - Removed by script#' \
"${DOWNLOADS_DIR}/eigen/Eigen/src/Core/arch/NEON/Complex.h"
diff --git a/tensorflow/contrib/lite/examples/ios/camera/tflite_camera_example.xcodeproj/project.pbxproj b/tensorflow/contrib/lite/examples/ios/camera/tflite_camera_example.xcodeproj/project.pbxproj
index b0236e9c60..98d3b5bb8a 100644
--- a/tensorflow/contrib/lite/examples/ios/camera/tflite_camera_example.xcodeproj/project.pbxproj
+++ b/tensorflow/contrib/lite/examples/ios/camera/tflite_camera_example.xcodeproj/project.pbxproj
@@ -326,10 +326,6 @@
GCC_WARN_UNUSED_VARIABLE = YES;
HEADER_SEARCH_PATHS = (
"$(inherited)",
- ../../../../../../,
- ../../../downloads/flatbuffers/include/,
- ../../../downloads/eigen/,
- ../../../downloads/,
);
IPHONEOS_DEPLOYMENT_TARGET = 8.0;
MTL_ENABLE_DEBUG_INFO = YES;
@@ -373,10 +369,6 @@
GCC_WARN_UNUSED_VARIABLE = YES;
HEADER_SEARCH_PATHS = (
"$(inherited)",
- ../../../../../../,
- ../../../downloads/flatbuffers/include/,
- ../../../downloads/eigen/,
- ../../../downloads/,
);
IPHONEOS_DEPLOYMENT_TARGET = 8.0;
MTL_ENABLE_DEBUG_INFO = NO;
diff --git a/tensorflow/contrib/lite/g3doc/apis.md b/tensorflow/contrib/lite/g3doc/apis.md
index fe208e47d1..50cc146a87 100644
--- a/tensorflow/contrib/lite/g3doc/apis.md
+++ b/tensorflow/contrib/lite/g3doc/apis.md
@@ -29,7 +29,7 @@ interpreter->AllocateTensors();
float* input = interpreter->typed_input_tensor<float>(0);
// Fill `input`.
interpreter->Invoke();
-float* output = interpreter->type_output_tensor<float>(0);
+float* output = interpreter->typed_output_tensor<float>(0);
```
### Data Alignment
diff --git a/tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/Camera2BasicFragment.java b/tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/Camera2BasicFragment.java
index 300786c3ca..18f6465188 100644
--- a/tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/Camera2BasicFragment.java
+++ b/tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/Camera2BasicFragment.java
@@ -54,6 +54,9 @@ import android.view.Surface;
import android.view.TextureView;
import android.view.View;
import android.view.ViewGroup;
+import android.widget.CompoundButton;
+import android.widget.NumberPicker;
+import android.widget.ToggleButton;
import android.widget.TextView;
import android.widget.Toast;
import java.io.IOException;
@@ -82,6 +85,8 @@ public class Camera2BasicFragment extends Fragment
private boolean runClassifier = false;
private boolean checkedPermissions = false;
private TextView textView;
+ private ToggleButton toggle;
+ private NumberPicker np;
private ImageClassifier classifier;
/** Max preview width that is guaranteed by Camera2 API */
@@ -289,6 +294,24 @@ public class Camera2BasicFragment extends Fragment
public void onViewCreated(final View view, Bundle savedInstanceState) {
textureView = (AutoFitTextureView) view.findViewById(R.id.texture);
textView = (TextView) view.findViewById(R.id.text);
+ toggle = (ToggleButton) view.findViewById(R.id.button);
+
+ toggle.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
+ public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) {
+ classifier.setUseNNAPI(isChecked);
+ }
+ });
+
+ np = (NumberPicker) view.findViewById(R.id.np);
+ np.setMinValue(1);
+ np.setMaxValue(10);
+ np.setWrapSelectorWheel(true);
+ np.setOnValueChangedListener(new NumberPicker.OnValueChangeListener() {
+ @Override
+ public void onValueChange(NumberPicker picker, int oldVal, int newVal){
+ classifier.setNumThreads(newVal);
+ }
+ });
}
/** Load the model and labels. */
diff --git a/tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/ImageClassifier.java b/tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/ImageClassifier.java
index c57bb348c5..d32c077910 100644
--- a/tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/ImageClassifier.java
+++ b/tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/ImageClassifier.java
@@ -142,6 +142,16 @@ public abstract class ImageClassifier {
}
}
+ public void setUseNNAPI(Boolean nnapi) {
+ if (tflite != null)
+ tflite.setUseNNAPI(nnapi);
+ }
+
+ public void setNumThreads(int num_threads) {
+ if (tflite != null)
+ tflite.setNumThreads(num_threads);
+ }
+
/** Closes tflite to release resources. */
public void close() {
tflite.close();
diff --git a/tensorflow/contrib/lite/java/demo/app/src/main/res/layout/fragment_camera2_basic.xml b/tensorflow/contrib/lite/java/demo/app/src/main/res/layout/fragment_camera2_basic.xml
index 15305c436e..db557ad62f 100644
--- a/tensorflow/contrib/lite/java/demo/app/src/main/res/layout/fragment_camera2_basic.xml
+++ b/tensorflow/contrib/lite/java/demo/app/src/main/res/layout/fragment_camera2_basic.xml
@@ -22,24 +22,59 @@
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_alignParentStart="true"
+ android:layout_alignParentLeft="true"
android:layout_alignParentTop="true" />
<FrameLayout
android:id="@+id/control"
android:layout_width="match_parent"
- android:layout_height="112dp"
+ android:layout_height="135dp"
android:layout_alignParentBottom="true"
android:layout_alignParentStart="true"
+ android:layout_alignParentLeft="true"
+ android:layout_alignParentEnd="true"
+ android:layout_alignParentRight="true"
+ android:layout_marginEnd="150dp"
+ android:layout_marginRight="150dp"
android:background="@color/control_background">
- <TextView android:id="@+id/text"
+ <TextView
+ android:id="@+id/text"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
- android:paddingLeft="80dp"
+ android:paddingLeft="20dp"
android:textColor="#FFF"
android:textSize="20sp"
android:textStyle="bold" />
</FrameLayout>
+ <RelativeLayout
+ android:id="@+id/control2"
+ android:layout_width="match_parent"
+ android:layout_height="135dp"
+ android:layout_alignParentLeft="true"
+ android:layout_alignParentStart="true"
+ android:layout_alignTop="@+id/control"
+ android:layout_marginLeft="300dp"
+ android:layout_marginStart="300dp"
+ android:background="@color/control_background">
+
+ <ToggleButton
+ android:id="@+id/button"
+ android:textOff="@string/tflite"
+ android:textOn="@string/nnapi"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_alignParentLeft="true"
+ android:layout_alignParentStart="true" />
+
+ <NumberPicker
+ android:id="@+id/np"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_below="@+id/button"
+ android:visibility="visible" />
+ </RelativeLayout>
+
</RelativeLayout>
diff --git a/tensorflow/contrib/lite/java/demo/app/src/main/res/values/strings.xml b/tensorflow/contrib/lite/java/demo/app/src/main/res/values/strings.xml
index a08ec3eb62..29a033bcd4 100644
--- a/tensorflow/contrib/lite/java/demo/app/src/main/res/values/strings.xml
+++ b/tensorflow/contrib/lite/java/demo/app/src/main/res/values/strings.xml
@@ -21,4 +21,6 @@
<string name="toggle_turn_on">NN:On</string>
<string name="toggle_turn_off">NN:Off</string>
<string name="toggle">Use NNAPI</string>
+ <string name="tflite">tflite</string>
+ <string name="nnapi">NNAPI</string>
</resources>
diff --git a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Interpreter.java b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Interpreter.java
index e915e65aa1..e84ee71129 100644
--- a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Interpreter.java
+++ b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Interpreter.java
@@ -215,6 +215,13 @@ public final class Interpreter implements AutoCloseable {
}
}
+ public void setNumThreads(int num_threads) {
+ if (wrapper == null) {
+ throw new IllegalStateException("The interpreter has already been closed.");
+ }
+ wrapper.setNumThreads(num_threads);
+ }
+
/** Release resources associated with the {@code Interpreter}. */
@Override
public void close() {
diff --git a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java
index dfc8ac111a..2fc803715b 100644
--- a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java
+++ b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java
@@ -153,6 +153,10 @@ final class NativeInterpreterWrapper implements AutoCloseable {
useNNAPI(interpreterHandle, useNNAPI);
}
+ void setNumThreads(int num_threads) {
+ numThreads(interpreterHandle, num_threads);
+ }
+
/** Gets index of an input given its name. */
int getInputIndex(String name) {
if (inputsIndexes == null) {
@@ -324,6 +328,8 @@ final class NativeInterpreterWrapper implements AutoCloseable {
private static native void useNNAPI(long interpreterHandle, boolean state);
+ private static native void numThreads(long interpreterHandle, int num_threads);
+
private static native long createErrorReporter(int size);
private static native long createModel(String modelPathOrBuffer, long errorHandle);
diff --git a/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.cc b/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.cc
index ccfdfd829b..45f510da1d 100644
--- a/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.cc
+++ b/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.cc
@@ -320,6 +320,16 @@ Java_org_tensorflow_lite_NativeInterpreterWrapper_useNNAPI(JNIEnv* env,
interpreter->UseNNAPI(static_cast<bool>(state));
}
+JNIEXPORT void JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_numThreads(JNIEnv* env,
+ jclass clazz,
+ jlong handle,
+ jint num_threads) {
+ tflite::Interpreter* interpreter = convertLongToInterpreter(env, handle);
+ if (interpreter == nullptr) return;
+ interpreter->SetNumThreads(static_cast<int>(num_threads));
+}
+
JNIEXPORT jlong JNICALL
Java_org_tensorflow_lite_NativeInterpreterWrapper_createErrorReporter(
JNIEnv* env, jclass clazz, jint size) {
diff --git a/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h b/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h
index 0e28a77fee..eaa765cb34 100644
--- a/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h
+++ b/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h
@@ -61,7 +61,7 @@ Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputNames(JNIEnv* env,
/*
* Class: org_tensorflow_lite_NativeInterpreterWrapper
* Method:
- * Signature: (JZ)
+ * Signature: (JZ)V
*/
JNIEXPORT void JNICALL
Java_org_tensorflow_lite_NativeInterpreterWrapper_useNNAPI(JNIEnv* env,
@@ -72,6 +72,16 @@ Java_org_tensorflow_lite_NativeInterpreterWrapper_useNNAPI(JNIEnv* env,
/*
* Class: org_tensorflow_lite_NativeInterpreterWrapper
* Method:
+ * Signature: (JI)V
+ */
+JNIEXPORT void JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_numThreads(JNIEnv* env,
+ jclass clazz,
+ jlong handle,
+ jint num_threads);
+/*
+ * Class: org_tensorflow_lite_NativeInterpreterWrapper
+ * Method:
* Signature: (I)J
*/
JNIEXPORT jlong JNICALL
diff --git a/tensorflow/contrib/lite/kernels/add.cc b/tensorflow/contrib/lite/kernels/add.cc
index 63ea89df56..e0aa070e2d 100644
--- a/tensorflow/contrib/lite/kernels/add.cc
+++ b/tensorflow/contrib/lite/kernels/add.cc
@@ -176,7 +176,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
output);
} else {
context->ReportError(context,
- "Inputs and outputs not all float|unit8 types.");
+ "Inputs and outputs not all float|uint8 types.");
return kTfLiteError;
}
diff --git a/tensorflow/contrib/lite/kernels/div.cc b/tensorflow/contrib/lite/kernels/div.cc
index 6dd243ad62..ec380c8e49 100644
--- a/tensorflow/contrib/lite/kernels/div.cc
+++ b/tensorflow/contrib/lite/kernels/div.cc
@@ -106,6 +106,8 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node,
#undef TF_LITE_DIV
}
+
+
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data);
@@ -118,7 +120,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
if (output->type == kTfLiteFloat32) {
EvalFloat<kernel_type>(context, node, params, data, input1, input2, output);
} else {
- context->ReportError(context, "Inputs and outputs not all float types.");
+ context->ReportError(context,
+ "Div only supports FLOAT32 and quantized UINT8 now.");
return kTfLiteError;
}
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
index d585bcca0e..9e9aba0169 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
@@ -4374,7 +4374,7 @@ inline void Softmax(const uint8* input_data, const Dims<4>& input_dims,
using FixedPointAccum = gemmlowp::FixedPoint<int32, kAccumulationIntegerBits>;
using FixedPoint0 = gemmlowp::FixedPoint<int32, 0>;
- gemmlowp::ScopedProfilingLabel label("Softmax/8bit");
+gemmlowp::ScopedProfilingLabel label("Softmax/8bit");
const int batches = MatchingArraySize(input_dims, 3, output_dims, 3);
const int height = MatchingArraySize(input_dims, 2, output_dims, 2);
const int width = MatchingArraySize(input_dims, 1, output_dims, 1);
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
index ae295cc8b5..4c8cbe4275 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
@@ -1403,6 +1403,33 @@ inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims,
output_data, output_dims);
}
+inline void Div(const float* input1_data, const Dims<4>& input1_dims,
+ const float* input2_data, const Dims<4>& input2_dims,
+ float output_activation_min, float output_activation_max,
+ float* output_data, const Dims<4>& output_dims) {
+ const int batches =
+ MatchingArraySize(input1_dims, 3, input2_dims, 3, output_dims, 3);
+ const int height =
+ MatchingArraySize(input1_dims, 2, input2_dims, 2, output_dims, 2);
+ const int width =
+ MatchingArraySize(input1_dims, 1, input2_dims, 1, output_dims, 1);
+ const int depth =
+ MatchingArraySize(input1_dims, 0, input2_dims, 0, output_dims, 0);
+ for (int b = 0; b < batches; ++b) {
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ for (int c = 0; c < depth; ++c) {
+ output_data[Offset(output_dims, c, x, y, b)] =
+ ActivationFunctionWithMinMax(
+ input1_data[Offset(input1_dims, c, x, y, b)] /
+ input2_data[Offset(input2_dims, c, x, y, b)],
+ output_activation_min, output_activation_max);
+ }
+ }
+ }
+ }
+}
+
// TODO(jiawen): We can implement BroadcastDiv on buffers of arbitrary
// dimensionality if the runtime code does a single loop over one dimension
// that handles broadcasting as the base case. The code generator would then
@@ -1444,18 +1471,6 @@ void BroadcastDiv(const T* input1_data, const Dims<4>& input1_dims,
}
}
-inline void Div(const float* input1_data, const Dims<4>& input1_dims,
- const float* input2_data, const Dims<4>& input2_dims,
- float output_activation_min, float output_activation_max,
- float* output_data, const Dims<4>& output_dims) {
- const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
- for (int i = 0; i < flat_size; ++i) {
- output_data[i] = ActivationFunctionWithMinMax(
- input1_data[i] / input2_data[i], output_activation_min,
- output_activation_max);
- }
-}
-
inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
const float* input2_data, const Dims<4>& input2_dims,
float output_activation_min, float output_activation_max,
diff --git a/tensorflow/contrib/lite/kernels/sub.cc b/tensorflow/contrib/lite/kernels/sub.cc
index 66b06aeaec..7c60a4fdbf 100644
--- a/tensorflow/contrib/lite/kernels/sub.cc
+++ b/tensorflow/contrib/lite/kernels/sub.cc
@@ -174,7 +174,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
EvalQuantized<kernel_type>(context, node, params, data, input1, input2,
output);
} else {
- context->ReportError(context, "Inputs and outputs not all float types.");
+ context->ReportError(context,
+ "Inputs and outputs not all float|uint8 types.");
return kTfLiteError;
}
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_tensorflow_merge.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_tensorflow_merge.cc
index 477e7f13da..38e0005890 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_tensorflow_merge.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_tensorflow_merge.cc
@@ -32,7 +32,7 @@ bool ResolveTensorFlowMerge::Run(Model* model, std::size_t op_index) {
}
// We need to yield until this Merge node has only 1 input, which will mean
- // that that is the selected input. Other graph transformations on other nodes
+ // that is the selected input. Other graph transformations on other nodes
// such as ResolveTensorFlowSwitch, will take care of trimming the
// non-selected inputs, so that at some point there will be only 1 input left.
if (merge_op->inputs.size() > 1) {
diff --git a/tensorflow/contrib/lite/toco/model.h b/tensorflow/contrib/lite/toco/model.h
index 705a9d69a6..482cc71d8b 100644
--- a/tensorflow/contrib/lite/toco/model.h
+++ b/tensorflow/contrib/lite/toco/model.h
@@ -152,9 +152,9 @@ enum class AxesOrder {
};
// The type of the scalars in an array.
-// Note that that does not by itself tell whether the values in the array are
-// real (are literally interpreted as real numbers) or quantized (only acquire
-// a meaning as real numbers in conjunction with QuantizationParams).
+// Note that the type does not by itself tell whether the values in the array
+// are real (are literally interpreted as real numbers) or quantized (only
+// acquire a meaning as real numbers in conjunction with QuantizationParams).
//
// In practice though:
// float values are always real