aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/kernels/activations.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-07-20 16:23:13 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-07-20 16:25:58 -0700
commit41781bad97698c29cd74203cef465d2adb2f04e8 (patch)
tree334a945f0b3f1bc11330fe7c32ec2e9b38afa750 /tensorflow/contrib/lite/kernels/activations.cc
parentbf020cb3160345a30f0551ffbd6c507e33753a1e (diff)
Add support for computing Softmax activation over tensors of rank 1.
PiperOrigin-RevId: 205470922
Diffstat (limited to 'tensorflow/contrib/lite/kernels/activations.cc')
-rw-r--r--tensorflow/contrib/lite/kernels/activations.cc53
1 files changed, 42 insertions, 11 deletions
diff --git a/tensorflow/contrib/lite/kernels/activations.cc b/tensorflow/contrib/lite/kernels/activations.cc
index 99f81c4a8a..d5ac2a7814 100644
--- a/tensorflow/contrib/lite/kernels/activations.cc
+++ b/tensorflow/contrib/lite/kernels/activations.cc
@@ -186,8 +186,8 @@ TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_EQ(context, input->type, output->type);
- TF_LITE_ENSURE(context,
- NumDimensions(input) == 2 || NumDimensions(input) == 4);
+ const int num_dims = NumDimensions(input);
+ TF_LITE_ENSURE(context, num_dims == 1 || num_dims == 2 || num_dims == 4);
if (input->type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
@@ -365,13 +365,9 @@ TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
-// Takes a 2D tensor and perform softmax along the second dimension.
-void Softmax2DFloat(const TfLiteTensor* input, TfLiteTensor* output,
- TfLiteSoftmaxParams* params) {
- const int batch_size = input->dims->data[0];
- const int input_size = input->dims->data[1];
- float* in = input->data.f;
- float* out = output->data.f;
+// Performs softmax along the input of size (input_size * batch_size).
+void Softmax(const float* in, const int input_size, const int batch_size,
+ const float beta, float* out) {
TF_LITE_ASSERT(input_size > 0);
// For each batch
@@ -385,7 +381,7 @@ void Softmax2DFloat(const TfLiteTensor* input, TfLiteTensor* output,
// Compute the normalized sum of exps.
float exp_sum = 0.0;
for (int i = 0; i < input_size; i++) {
- out[i] = std::exp((in[i] - max_coeff) * params->beta);
+ out[i] = std::exp((in[i] - max_coeff) * beta);
exp_sum += out[i];
}
@@ -401,6 +397,33 @@ void Softmax2DFloat(const TfLiteTensor* input, TfLiteTensor* output,
}
}
+// Takes a 1D tensor and performs softmax along it.
+void Softmax1DFloat(const TfLiteTensor* input, TfLiteTensor* output,
+ TfLiteSoftmaxParams* params) {
+ const int input_size = input->dims->data[0];
+ Softmax(input->data.f, input_size, 1, params->beta, output->data.f);
+}
+
+// Takes a 2D tensor and perform softmax along the last dimension.
+void Softmax2DFloat(const TfLiteTensor* input, TfLiteTensor* output,
+ TfLiteSoftmaxParams* params) {
+ const int batch_size = input->dims->data[0];
+ const int input_size = input->dims->data[1];
+ Softmax(input->data.f, input_size, batch_size, params->beta, output->data.f);
+}
+
+void Softmax1DQuantized(const TfLiteTensor* input, TfLiteTensor* output,
+ TfLiteSoftmaxParams* params, OpData* data) {
+ // TODO(ahentz): this is arguably a dirty trick. Since the implementation
+ // always traverses the last dimension of a 4D tensor, we will pretend our 1D
+ // tensor is 4D in a special way. We will convert a (Y) shape into a (1,
+ // 1, 1, Y) shape.
+ const int input_size = input->dims->data[0];
+ optimized_ops::Softmax(
+ GetTensorData<uint8_t>(input), GetTensorShape({1, 1, 1, input_size}),
+ data->input_multiplier, data->input_left_shift, data->diff_min,
+ GetTensorData<uint8_t>(output), GetTensorShape({1, 1, 1, input_size}));
+}
void Softmax2DQuantized(const TfLiteTensor* input, TfLiteTensor* output,
TfLiteSoftmaxParams* params, OpData* data) {
// TODO(ahentz): this is arguably a dirty trick. Since the implementation
@@ -443,6 +466,10 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
// dimensions.
switch (input->type) {
case kTfLiteFloat32: {
+ if (NumDimensions(input) == 1) {
+ Softmax1DFloat(input, output, params);
+ return kTfLiteOk;
+ }
if (NumDimensions(input) == 2) {
Softmax2DFloat(input, output, params);
return kTfLiteOk;
@@ -452,11 +479,15 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
context->ReportError(
- context, "Only 2D and 4D tensors supported currently, got %dD.",
+ context, "Only 1D, 2D and 4D tensors supported currently, got %dD.",
NumDimensions(input));
return kTfLiteError;
}
case kTfLiteUInt8: {
+ if (NumDimensions(input) == 1) {
+ Softmax1DQuantized(input, output, params, data);
+ return kTfLiteOk;
+ }
if (NumDimensions(input) == 2) {
Softmax2DQuantized(input, output, params, data);
return kTfLiteOk;