aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/kernels/fully_connected.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-05-22 15:24:01 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-05-22 15:26:45 -0700
commit17272b4d1ccb5c7bd0bc3015c34f8bd769516354 (patch)
treea607417c9ca19c8a94bdac11f7bde5ecd35e8e4e /tensorflow/contrib/lite/kernels/fully_connected.cc
parent56502dc77e7ead9c9a4f63bf3405a937307a6f37 (diff)
Adds a kernel that checks whether vector is zero or not.
PiperOrigin-RevId: 197633182
Diffstat (limited to 'tensorflow/contrib/lite/kernels/fully_connected.cc')
-rw-r--r--tensorflow/contrib/lite/kernels/fully_connected.cc8
1 files changed, 2 insertions, 6 deletions
diff --git a/tensorflow/contrib/lite/kernels/fully_connected.cc b/tensorflow/contrib/lite/kernels/fully_connected.cc
index 3374923e6e..1b942a1910 100644
--- a/tensorflow/contrib/lite/kernels/fully_connected.cc
+++ b/tensorflow/contrib/lite/kernels/fully_connected.cc
@@ -101,6 +101,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
input_size *= input->dims->data[i];
}
+ TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 2);
const int batch_size = input_size / filter->dims->data[1];
const int num_units = filter->dims->data[0];
@@ -109,8 +110,6 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 0));
}
- TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 2);
-
// Note that quantized inference requires that all tensors have their
// parameters set. This is usually done during quantized training.
TfLiteType data_type = input->type;
@@ -218,11 +217,8 @@ TfLiteStatus EvalPieQuantized(TfLiteContext* context, TfLiteNode* node,
tensor_utils::ZeroVector(output->data.f, batch_size * num_units);
}
- // TODO(mirkov): change std::minmax_element with a vectorized call.
- auto minmax_element =
- std::minmax_element(input->data.f, input->data.f + total_input_size);
// Save matrix multiplication computation for all zero input.
- if (*minmax_element.first == 0.0 && *minmax_element.second == 0.0) {
+ if (tensor_utils::IsZeroVector(input->data.f, total_input_size)) {
tensor_utils::ApplyActivationToVector(output->data.f,
batch_size * num_units,
params->activation, output->data.f);