aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-05-31 15:11:26 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-05-31 15:13:58 -0700
commit269a4ed1c27251b55cffe578b7bd969ec5975487 (patch)
tree83dd602a71ad69b3fcb7b5ff5adc59c7adac3758 /tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
parentf21816ecefe3f6e554d3b7daae3bb7f7a03bad20 (diff)
Internal change.
PiperOrigin-RevId: 198787391
Diffstat (limited to 'tensorflow/contrib/lite/kernels/internal/kernel_utils.cc')
-rw-r--r--tensorflow/contrib/lite/kernels/internal/kernel_utils.cc7
1 files changed, 2 insertions, 5 deletions
diff --git a/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc b/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
index 3bbaaa6a9d..67e3810479 100644
--- a/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
@@ -52,7 +52,8 @@ void RnnBatchStep(const float* input_ptr_batch, const int8_t* input_weights_ptr,
TfLiteFusedActivation activation,
int8_t* quantized_input_ptr_batch,
int8_t* quantized_hidden_state_ptr_batch,
- float* hidden_state_ptr_batch, float* output_ptr_batch) {
+ float* scaling_factors, float* hidden_state_ptr_batch,
+ float* output_ptr_batch) {
// Output = bias
tensor_utils::VectorBatchVectorAssign(bias_ptr, num_units, batch_size,
output_ptr_batch);
@@ -62,7 +63,6 @@ void RnnBatchStep(const float* input_ptr_batch, const int8_t* input_weights_ptr,
// Quantize input from float to uint8 + quantization params (scaling
// factor).
float unused_min, unused_max;
- float* scaling_factors = new float[batch_size];
for (int b = 0; b < batch_size; ++b) {
const int offset = b * input_size;
tensor_utils::SymmetricQuantizeFloats(
@@ -76,7 +76,6 @@ void RnnBatchStep(const float* input_ptr_batch, const int8_t* input_weights_ptr,
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
input_weights_ptr, num_units, input_size, quantized_input_ptr_batch,
scaling_factors, batch_size, output_ptr_batch, /*result_stride=*/1);
- delete[] scaling_factors;
}
// Save quantization and matmul computation for all zero input.
@@ -84,7 +83,6 @@ void RnnBatchStep(const float* input_ptr_batch, const int8_t* input_weights_ptr,
batch_size * num_units)) {
// Quantize hidden_state
float unused_min, unused_max;
- float* scaling_factors = new float[batch_size];
for (int b = 0; b < batch_size; ++b) {
const int offset = b * num_units;
tensor_utils::SymmetricQuantizeFloats(
@@ -99,7 +97,6 @@ void RnnBatchStep(const float* input_ptr_batch, const int8_t* input_weights_ptr,
recurrent_weights_ptr, num_units, num_units,
quantized_hidden_state_ptr_batch, scaling_factors, batch_size,
output_ptr_batch, /*result_stride=*/1);
- delete[] scaling_factors;
}
// Output = activation(Output) and update hidden_state