aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/kernels/concatenation.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-09-27 06:12:59 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-09-27 06:16:42 -0700
commitabf26356209cba1ba895a06d9ce55ad01dad7fc6 (patch)
tree5ef1c907a30bf89d08ba241ef985b19938427420 /tensorflow/contrib/lite/kernels/concatenation.cc
parent19d8963bc0ea64e10ff08ad4e7cc76813a182196 (diff)
Update kernel evals to use new kernel signatures.
PiperOrigin-RevId: 214763814
Diffstat (limited to 'tensorflow/contrib/lite/kernels/concatenation.cc')
-rw-r--r--tensorflow/contrib/lite/kernels/concatenation.cc39
1 files changed, 25 insertions, 14 deletions
diff --git a/tensorflow/contrib/lite/kernels/concatenation.cc b/tensorflow/contrib/lite/kernels/concatenation.cc
index 25ea556d5a..7ad3399ffd 100644
--- a/tensorflow/contrib/lite/kernels/concatenation.cc
+++ b/tensorflow/contrib/lite/kernels/concatenation.cc
@@ -100,20 +100,31 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// allocate and populate these during Prepare().
// TODO(ycling): Activation function parameter is ignored. For now we dont have
// a model with a Concatenation with fused activation function.
-#define TF_LITE_CONCATENATION(type, scalar) \
- VectorOfTensors<scalar> all_inputs(*context, *node->inputs); \
- type::Concatenation<FusedActivationFunctionType::kNone, scalar>( \
- RemapDim(NumDimensions(output), axis), all_inputs.data(), \
- all_inputs.dims(), node->inputs->size, GetTensorData<scalar>(output), \
- GetTensorDims(output))
-
-#define TF_LITE_CONCATENATION_QUANTIZED(type) \
- VectorOfQuantizedTensors all_inputs(*context, *node->inputs); \
- type::Concatenation( \
- RemapDim(NumDimensions(output), axis), all_inputs.data(), \
- all_inputs.dims(), all_inputs.zero_point(), all_inputs.scale(), \
- node->inputs->size, GetTensorData<uint8>(output), GetTensorDims(output), \
- output->params.zero_point, output->params.scale)
+#define TF_LITE_CONCATENATION(type, scalar) \
+ { \
+ VectorOfTensors<scalar> all_inputs(*context, *node->inputs); \
+ tflite::ConcatenationParams op_params; \
+ op_params.axis = axis; \
+ op_params.inputs_count = node->inputs->size; \
+ type::Concatenation(op_params, all_inputs.shapes(), all_inputs.data(), \
+ GetTensorShape(output), \
+ GetTensorData<scalar>(output)); \
+ }
+
+#define TF_LITE_CONCATENATION_QUANTIZED(type) \
+ { \
+ VectorOfQuantizedTensors all_inputs(*context, *node->inputs); \
+ tflite::ConcatenationParams op_params; \
+ op_params.axis = axis; \
+ op_params.input_zeropoint = all_inputs.zero_point(); \
+ op_params.input_scale = all_inputs.scale(); \
+ op_params.inputs_count = node->inputs->size; \
+ op_params.output_zeropoint = output->params.zero_point; \
+ op_params.output_scale = output->params.scale; \
+ type::ConcatenationWithScaling(op_params, all_inputs.shapes(), \
+ all_inputs.data(), GetTensorShape(output), \
+ GetTensorData<uint8>(output)); \
+ }
switch (output->type) { // Already know in/outtypes are same.
case kTfLiteFloat32: