aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-05-11 19:38:48 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-05-11 19:41:29 -0700
commit52e2698ac969a0f82c6ce901f80f04818ca8ac4e (patch)
tree5b89fa879c61cac2f3b64d5edbf405ad616edde5 /tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
parent84b5938aaee991d6909e16e56c66bf88e8843fbb (diff)
Making GetInput from kernel_util.h return a pointer to const data.
PiperOrigin-RevId: 196340200
Diffstat (limited to 'tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc')
-rw-r--r--tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc65
1 files changed, 34 insertions, 31 deletions
diff --git a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
index a35ba23ced..1cd4884696 100644
--- a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
+++ b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
@@ -143,13 +143,13 @@ TfLiteStatus CheckLstmTensorDimensions(
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[1], n_input);
}
- TfLiteTensor* input_to_forget_weights =
+ const TfLiteTensor* input_to_forget_weights =
GetInput(context, node, input_to_forget_weights_tensor);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[1], n_input);
- TfLiteTensor* input_to_cell_weights =
+ const TfLiteTensor* input_to_cell_weights =
GetInput(context, node, input_to_cell_weights_tensor);
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[0], n_cell);
@@ -165,7 +165,7 @@ TfLiteStatus CheckLstmTensorDimensions(
n_output);
}
- TfLiteTensor* recurrent_to_forget_weights =
+ const TfLiteTensor* recurrent_to_forget_weights =
GetInput(context, node, recurrent_to_forget_weights_tensor);
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[0],
@@ -173,7 +173,7 @@ TfLiteStatus CheckLstmTensorDimensions(
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[1],
n_output);
- TfLiteTensor* recurrent_to_cell_weights =
+ const TfLiteTensor* recurrent_to_cell_weights =
GetInput(context, node, recurrent_to_cell_weights_tensor);
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[0], n_cell);
@@ -231,16 +231,17 @@ TfLiteStatus CheckLstmTensorDimensions(
TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->data[0], n_cell);
}
- TfLiteTensor* forget_gate_bias =
+ const TfLiteTensor* forget_gate_bias =
GetInput(context, node, forget_gate_bias_tensor);
TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->data[0], n_cell);
- TfLiteTensor* cell_bias = GetInput(context, node, cell_gate_bias_tensor);
+ const TfLiteTensor* cell_bias =
+ GetInput(context, node, cell_gate_bias_tensor);
TF_LITE_ENSURE_EQ(context, cell_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_bias->dims->data[0], n_cell);
- TfLiteTensor* output_gate_bias =
+ const TfLiteTensor* output_gate_bias =
GetInput(context, node, output_gate_bias_tensor);
TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->data[0], n_cell);
@@ -312,20 +313,20 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Inferring batch size, number of outputs and sequence length and
// number of cells from the input tensors.
- TfLiteTensor* input = GetInput(context, node, kInputTensor);
+ const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TF_LITE_ENSURE(context, input->dims->size > 1);
const int max_time = input->dims->data[0];
const int n_batch = input->dims->data[1];
const int n_input = input->dims->data[2];
- TfLiteTensor* fw_input_to_output_weights =
+ const TfLiteTensor* fw_input_to_output_weights =
GetInput(context, node, kFwInputToOutputWeightsTensor);
const int n_fw_cell = fw_input_to_output_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, fw_input_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, fw_input_to_output_weights->dims->data[1],
n_input);
- TfLiteTensor* fw_recurrent_to_output_weights =
+ const TfLiteTensor* fw_recurrent_to_output_weights =
GetInput(context, node, kFwRecurrentToOutputWeightsTensor);
TF_LITE_ENSURE_EQ(context, fw_recurrent_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, fw_recurrent_to_output_weights->dims->data[0],
@@ -388,14 +389,14 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, fw_scratch_buffer,
fw_scratch_buffer_size));
// Same for the backward cell.
- TfLiteTensor* bw_input_to_output_weights =
+ const TfLiteTensor* bw_input_to_output_weights =
GetInput(context, node, kBwInputToOutputWeightsTensor);
const int n_bw_cell = bw_input_to_output_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, bw_input_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, bw_input_to_output_weights->dims->data[1],
n_input);
- TfLiteTensor* bw_recurrent_to_output_weights =
+ const TfLiteTensor* bw_recurrent_to_output_weights =
GetInput(context, node, kBwRecurrentToOutputWeightsTensor);
TF_LITE_ENSURE_EQ(context, bw_recurrent_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, bw_recurrent_to_output_weights->dims->data[0],
@@ -463,7 +464,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
// Input tensor.
- TfLiteTensor* input = GetInput(context, node, kInputTensor);
+ const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const int max_time = input->dims->data[0];
const int n_batch = input->dims->data[1];
const int n_input = input->dims->data[2];
@@ -471,20 +472,20 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// Tensors for the forward cell.
TfLiteTensor* fw_input_to_input_weights =
GetOptionalInputTensor(context, node, kFwInputToInputWeightsTensor);
- TfLiteTensor* fw_input_to_forget_weights =
+ const TfLiteTensor* fw_input_to_forget_weights =
GetInput(context, node, kFwInputToForgetWeightsTensor);
- TfLiteTensor* fw_input_to_cell_weights =
+ const TfLiteTensor* fw_input_to_cell_weights =
GetInput(context, node, kFwInputToCellWeightsTensor);
- TfLiteTensor* fw_input_to_output_weights =
+ const TfLiteTensor* fw_input_to_output_weights =
GetInput(context, node, kFwInputToOutputWeightsTensor);
TfLiteTensor* fw_recurrent_to_input_weights =
GetOptionalInputTensor(context, node, kFwRecurrentToInputWeightsTensor);
- TfLiteTensor* fw_recurrent_to_forget_weights =
+ const TfLiteTensor* fw_recurrent_to_forget_weights =
GetInput(context, node, kFwRecurrentToForgetWeightsTensor);
- TfLiteTensor* fw_recurrent_to_cell_weights =
+ const TfLiteTensor* fw_recurrent_to_cell_weights =
GetInput(context, node, kFwRecurrentToCellWeightsTensor);
- TfLiteTensor* fw_recurrent_to_output_weights =
+ const TfLiteTensor* fw_recurrent_to_output_weights =
GetInput(context, node, kFwRecurrentToOutputWeightsTensor);
TfLiteTensor* fw_cell_to_input_weights =
@@ -496,10 +497,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* fw_input_gate_bias =
GetOptionalInputTensor(context, node, kFwInputGateBiasTensor);
- TfLiteTensor* fw_forget_gate_bias =
+ const TfLiteTensor* fw_forget_gate_bias =
GetInput(context, node, kFwForgetGateBiasTensor);
- TfLiteTensor* fw_cell_bias = GetInput(context, node, kFwCellGateBiasTensor);
- TfLiteTensor* fw_output_gate_bias =
+ const TfLiteTensor* fw_cell_bias =
+ GetInput(context, node, kFwCellGateBiasTensor);
+ const TfLiteTensor* fw_output_gate_bias =
GetInput(context, node, kFwOutputGateBiasTensor);
TfLiteTensor* fw_projection_weights =
@@ -515,20 +517,20 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// Tensors for the backward cell.
TfLiteTensor* bw_input_to_input_weights =
GetOptionalInputTensor(context, node, kBwInputToInputWeightsTensor);
- TfLiteTensor* bw_input_to_forget_weights =
+ const TfLiteTensor* bw_input_to_forget_weights =
GetInput(context, node, kBwInputToForgetWeightsTensor);
- TfLiteTensor* bw_input_to_cell_weights =
+ const TfLiteTensor* bw_input_to_cell_weights =
GetInput(context, node, kBwInputToCellWeightsTensor);
- TfLiteTensor* bw_input_to_output_weights =
+ const TfLiteTensor* bw_input_to_output_weights =
GetInput(context, node, kBwInputToOutputWeightsTensor);
TfLiteTensor* bw_recurrent_to_input_weights =
GetOptionalInputTensor(context, node, kBwRecurrentToInputWeightsTensor);
- TfLiteTensor* bw_recurrent_to_forget_weights =
+ const TfLiteTensor* bw_recurrent_to_forget_weights =
GetInput(context, node, kBwRecurrentToForgetWeightsTensor);
- TfLiteTensor* bw_recurrent_to_cell_weights =
+ const TfLiteTensor* bw_recurrent_to_cell_weights =
GetInput(context, node, kBwRecurrentToCellWeightsTensor);
- TfLiteTensor* bw_recurrent_to_output_weights =
+ const TfLiteTensor* bw_recurrent_to_output_weights =
GetInput(context, node, kBwRecurrentToOutputWeightsTensor);
TfLiteTensor* bw_cell_to_input_weights =
@@ -540,10 +542,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* bw_input_gate_bias =
GetOptionalInputTensor(context, node, kBwInputGateBiasTensor);
- TfLiteTensor* bw_forget_gate_bias =
+ const TfLiteTensor* bw_forget_gate_bias =
GetInput(context, node, kBwForgetGateBiasTensor);
- TfLiteTensor* bw_cell_bias = GetInput(context, node, kBwCellGateBiasTensor);
- TfLiteTensor* bw_output_gate_bias =
+ const TfLiteTensor* bw_cell_bias =
+ GetInput(context, node, kBwCellGateBiasTensor);
+ const TfLiteTensor* bw_output_gate_bias =
GetInput(context, node, kBwOutputGateBiasTensor);
TfLiteTensor* bw_projection_weights =