From dfcec822728c6569914db37eb55a78a019866e6f Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 9 Jul 2018 08:26:51 -0700 Subject: Create shared op params for pool op kernels. PiperOrigin-RevId: 203769116 --- tensorflow/contrib/lite/kernels/pooling.cc | 98 +++++++++++++++++++----------- 1 file changed, 63 insertions(+), 35 deletions(-) (limited to 'tensorflow/contrib/lite/kernels/pooling.cc') diff --git a/tensorflow/contrib/lite/kernels/pooling.cc b/tensorflow/contrib/lite/kernels/pooling.cc index 7240fe04cc..9b0487ae16 100644 --- a/tensorflow/contrib/lite/kernels/pooling.cc +++ b/tensorflow/contrib/lite/kernels/pooling.cc @@ -126,13 +126,19 @@ void AverageEvalFloat(TfLiteContext* context, TfLiteNode* node, float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); -#define TF_LITE_AVERAGE_POOL(type) \ - type::AveragePool(GetTensorData(input), GetTensorShape(input), \ - params->stride_width, params->stride_height, \ - data->padding.width, data->padding.height, \ - params->filter_width, params->filter_height, \ - activation_min, activation_max, \ - GetTensorData(output), GetTensorShape(output)) +#define TF_LITE_AVERAGE_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.float_activation_min = activation_min; \ + op_params.float_activation_max = activation_max; \ + type::AveragePool(op_params, GetTensorShape(input), \ + GetTensorData(input), GetTensorShape(output), \ + GetTensorData(output)) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { @@ -149,13 +155,19 @@ void AverageEvalQuantized(TfLiteContext* context, TfLiteNode* node, int32_t activation_max; CalculateActivationRangeUint8(params->activation, output, &activation_min, &activation_max); -#define TF_LITE_AVERAGE_POOL(type) \ - type::AveragePool(GetTensorData(input), GetTensorShape(input), \ - params->stride_width, params->stride_height, \ - data->padding.width, data->padding.height, \ - params->filter_width, params->filter_height, \ - activation_min, activation_max, \ - GetTensorData(output), GetTensorShape(output)) +#define TF_LITE_AVERAGE_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.quantized_activation_min = activation_min; \ + op_params.quantized_activation_max = activation_max; \ + type::AveragePool(op_params, GetTensorShape(input), \ + GetTensorData(input), GetTensorShape(output), \ + GetTensorData(output)) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { @@ -171,13 +183,18 @@ void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); -#define TF_LITE_MAX_POOL(type) \ - type::MaxPool(GetTensorData(input), GetTensorShape(input), \ - params->stride_width, params->stride_height, \ - data->padding.width, data->padding.height, \ - params->filter_width, params->filter_height, activation_min, \ - activation_max, GetTensorData(output), \ - GetTensorShape(output)) +#define TF_LITE_MAX_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.float_activation_min = activation_min; \ + op_params.float_activation_max = activation_max; \ + type::MaxPool(op_params, GetTensorShape(input), GetTensorData(input), \ + GetTensorShape(output), GetTensorData(output)) if (kernel_type == kReference) { TF_LITE_MAX_POOL(reference_ops); } else { @@ -194,13 +211,19 @@ void MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node, int32_t activation_max; CalculateActivationRangeUint8(params->activation, output, &activation_min, &activation_max); -#define TF_LITE_MAX_POOL(type) \ - type::MaxPool(GetTensorData(input), GetTensorShape(input), \ - params->stride_width, params->stride_height, \ - data->padding.width, data->padding.height, \ - params->filter_width, params->filter_height, activation_min, \ - activation_max, GetTensorData(output), \ - GetTensorShape(output)) +#define TF_LITE_MAX_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.quantized_activation_min = activation_min; \ + op_params.quantized_activation_max = activation_max; \ + type::MaxPool(op_params, GetTensorShape(input), \ + GetTensorData(input), GetTensorShape(output), \ + GetTensorData(output)) if (kernel_type == kReference) { TF_LITE_MAX_POOL(reference_ops); } else { @@ -216,13 +239,18 @@ void L2EvalFloat(TfLiteContext* context, TfLiteNode* node, float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); -#define TF_LITE_L2_POOL(type) \ - type::L2Pool(GetTensorData(input), GetTensorShape(input), \ - params->stride_width, params->stride_height, \ - data->padding.width, data->padding.height, \ - params->filter_width, params->filter_height, activation_min, \ - activation_max, GetTensorData(output), \ - GetTensorShape(output)) +#define TF_LITE_L2_POOL(type) \ + tflite::PoolParams op_params; \ + op_params.stride_height = params->stride_height; \ + op_params.stride_width = params->stride_width; \ + op_params.filter_height = params->filter_height; \ + op_params.filter_width = params->filter_width; \ + op_params.padding_values.height = data->padding.height; \ + op_params.padding_values.width = data->padding.width; \ + op_params.float_activation_min = activation_min; \ + op_params.float_activation_max = activation_max; \ + type::L2Pool(op_params, GetTensorShape(input), GetTensorData(input), \ + GetTensorShape(output), GetTensorData(output)) if (kernel_type == kReference) { TF_LITE_L2_POOL(reference_ops); } else { -- cgit v1.2.3