aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/kernels/conv.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-06-21 14:34:04 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-21 14:36:16 -0700
commitdc0160a9b25cfb68d8a47d54634eda34e398019e (patch)
tree70c0c28979b309681441176a0f2deea6aae9a2ad /tensorflow/contrib/lite/kernels/conv.cc
parent39a66ecbe0f195625a83f6e7ccfc4b3e987c3bf4 (diff)
Changed some variable names from camel case to underscore for consistency.
PiperOrigin-RevId: 201587899
Diffstat (limited to 'tensorflow/contrib/lite/kernels/conv.cc')
-rw-r--r--tensorflow/contrib/lite/kernels/conv.cc41
1 files changed, 21 insertions, 20 deletions
diff --git a/tensorflow/contrib/lite/kernels/conv.cc b/tensorflow/contrib/lite/kernels/conv.cc
index 14b399ef96..93267f9a4f 100644
--- a/tensorflow/contrib/lite/kernels/conv.cc
+++ b/tensorflow/contrib/lite/kernels/conv.cc
@@ -179,9 +179,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_STATUS(AllocateTemporaryTensorsIfRequired(context, node));
- bool hasBias = node->inputs->size == 3;
+ bool has_bias = node->inputs->size == 3;
// Check number of inputs/outputs
- TF_LITE_ENSURE(context, hasBias || node->inputs->size == 2);
+ TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TfLiteTensor* output = &context->tensors[node->outputs->data[0]];
TfLiteTensor* input = &context->tensors[node->inputs->data[0]];
@@ -204,9 +204,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// TODO(ahentz): At this point the optimized versions require 'bias'. We can
// either change that or document that convolution requires it.
- TF_LITE_ENSURE(context, hasBias);
+ TF_LITE_ENSURE(context, has_bias);
- if (hasBias) {
+ if (has_bias) {
bias = &context->tensors[node->inputs->data[2]];
if (data_type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32);
@@ -226,29 +226,30 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Matching GetWindowedOutputSize in TensorFlow.
auto padding = params->padding;
- auto computeOutSize = [padding](int imageSize, int filterSize, int stride,
- int dilationRate) -> int {
- int effectiveFilterSize = (filterSize - 1) * dilationRate + 1;
+ auto compute_out_size = [padding](int image_size, int filter_size, int stride,
+ int dilation_rate) -> int {
+ int effective_filter_size = (filter_size - 1) * dilation_rate + 1;
return padding == kTfLitePaddingSame
- ? (imageSize + stride - 1) / stride
+ ? (image_size + stride - 1) / stride
: padding == kTfLitePaddingValid
- ? (imageSize - effectiveFilterSize + stride) / stride
+ ? (image_size - effective_filter_size + stride) / stride
: 0;
};
- int outWidth = computeOutSize(width, filter_width, params->stride_width,
- params->dilation_width_factor);
- int outHeight = computeOutSize(height, filter_height, params->stride_height,
- params->dilation_height_factor);
+ int out_width = compute_out_size(width, filter_width, params->stride_width,
+ params->dilation_width_factor);
+ int out_height =
+ compute_out_size(height, filter_height, params->stride_height,
+ params->dilation_height_factor);
data->padding.height =
ComputePadding(params->stride_height, params->dilation_height_factor,
- height, filter_height, outHeight);
+ height, filter_height, out_height);
data->padding.width =
ComputePadding(params->stride_width, params->dilation_width_factor, width,
- filter_width, outWidth);
+ filter_width, out_width);
- TF_LITE_ENSURE(context, hasBias);
+ TF_LITE_ENSURE(context, has_bias);
// Note that quantized inference requires that all tensors have their
// parameters set. This is usually done during quantized training.
@@ -267,8 +268,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = batches;
- output_size->data[1] = outHeight;
- output_size->data[2] = outWidth;
+ output_size->data[1] = out_height;
+ output_size->data[2] = out_width;
output_size->data[3] = channels_out;
auto output_status = context->ResizeTensor(context, output, output_size);
@@ -458,9 +459,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = &context->tensors[node->outputs->data[0]];
TfLiteTensor* input = &context->tensors[node->inputs->data[0]];
TfLiteTensor* filter = &context->tensors[node->inputs->data[1]];
- bool hasBias = node->inputs->size == 3;
+ bool has_bias = node->inputs->size == 3;
TfLiteTensor* bias =
- hasBias ? &context->tensors[node->inputs->data[2]] : nullptr;
+ has_bias ? &context->tensors[node->inputs->data[2]] : nullptr;
TfLiteTensor* im2col =
data->need_im2col
? &context->tensors[node->temporaries->data[data->im2col_index]]