aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/kernels
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/contrib/lite/kernels')
-rw-r--r--tensorflow/contrib/lite/kernels/conv.cc2
-rw-r--r--tensorflow/contrib/lite/kernels/depthwise_conv.cc2
-rw-r--r--tensorflow/contrib/lite/kernels/fully_connected.cc2
-rw-r--r--tensorflow/contrib/lite/kernels/kernel_util.h2
-rw-r--r--tensorflow/contrib/lite/kernels/lsh_projection.cc2
-rw-r--r--tensorflow/contrib/lite/kernels/lstm.cc6
-rw-r--r--tensorflow/contrib/lite/kernels/reshape.cc12
-rw-r--r--tensorflow/contrib/lite/kernels/reshape_test.cc2
-rw-r--r--tensorflow/contrib/lite/kernels/test_util.cc4
-rw-r--r--tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc2
10 files changed, 18 insertions, 18 deletions
diff --git a/tensorflow/contrib/lite/kernels/conv.cc b/tensorflow/contrib/lite/kernels/conv.cc
index e0cd12f1b4..b91ba1a03d 100644
--- a/tensorflow/contrib/lite/kernels/conv.cc
+++ b/tensorflow/contrib/lite/kernels/conv.cc
@@ -64,7 +64,7 @@ struct OpData {
TfLitePaddingValues padding;
// The scaling factor from input to output (aka the 'real multiplier') can
- // be represented as a fixed point multiplier plus a left shift.
+ // be represented as a fixed point multipler plus a left shift.
int32_t output_multiplier;
int output_shift;
// The range of the fused activation layer. For example for kNone and
diff --git a/tensorflow/contrib/lite/kernels/depthwise_conv.cc b/tensorflow/contrib/lite/kernels/depthwise_conv.cc
index cad9ce114c..15dbfe08c8 100644
--- a/tensorflow/contrib/lite/kernels/depthwise_conv.cc
+++ b/tensorflow/contrib/lite/kernels/depthwise_conv.cc
@@ -52,7 +52,7 @@ enum KernelType {
struct OpData {
TfLitePaddingValues padding;
// The scaling factor from input to output (aka the 'real multiplier') can
- // be represented as a fixed point multiplier plus a left shift.
+ // be represented as a fixed point multipler plus a left shift.
int32_t output_multiplier;
int output_shift;
// The range of the fused activation layer. For example for kNone and
diff --git a/tensorflow/contrib/lite/kernels/fully_connected.cc b/tensorflow/contrib/lite/kernels/fully_connected.cc
index 888e67966c..a77fe94e49 100644
--- a/tensorflow/contrib/lite/kernels/fully_connected.cc
+++ b/tensorflow/contrib/lite/kernels/fully_connected.cc
@@ -48,7 +48,7 @@ enum KernelType {
struct OpData {
// The scaling factor from input to output (aka the 'real multiplier') can
- // be represented as a fixed point multiplier plus a left shift.
+ // be represented as a fixed point multipler plus a left shift.
int32_t output_multiplier;
int output_shift;
// The range of the fused activation layer. For example for kNone and
diff --git a/tensorflow/contrib/lite/kernels/kernel_util.h b/tensorflow/contrib/lite/kernels/kernel_util.h
index 21da1daff7..28f53b9fbb 100644
--- a/tensorflow/contrib/lite/kernels/kernel_util.h
+++ b/tensorflow/contrib/lite/kernels/kernel_util.h
@@ -58,7 +58,7 @@ inline bool IsConstantTensor(TfLiteTensor* tensor) {
}
// Determines whether tensor is dynamic. Note that a tensor can be non-const and
-// not dynamic. This function specifically checks for a dynamic tensor.
+// not dynamic. This function specificially checks for a dynamic tensor.
inline bool IsDynamicTensor(TfLiteTensor* tensor) {
return tensor->allocation_type == kTfLiteDynamic;
}
diff --git a/tensorflow/contrib/lite/kernels/lsh_projection.cc b/tensorflow/contrib/lite/kernels/lsh_projection.cc
index 0ee35775d5..5f73b56ed9 100644
--- a/tensorflow/contrib/lite/kernels/lsh_projection.cc
+++ b/tensorflow/contrib/lite/kernels/lsh_projection.cc
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-// LSH Projection projects an input to a bit vector via locality sensitive
+// LSH Projection projects an input to a bit vector via locality senstive
// hashing.
//
// Options:
diff --git a/tensorflow/contrib/lite/kernels/lstm.cc b/tensorflow/contrib/lite/kernels/lstm.cc
index 8cf1165135..b9255b23a5 100644
--- a/tensorflow/contrib/lite/kernels/lstm.cc
+++ b/tensorflow/contrib/lite/kernels/lstm.cc
@@ -213,9 +213,9 @@ TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
// present.
// 2) If projection weight is present, then projection bias is optional.
// TODO(ghodrat): make sure this is correct.
- const bool projection_tensors_consistent =
+ const bool projecton_tensors_consistent =
((projection_weights != nullptr) || (projection_bias == nullptr));
- TF_LITE_ENSURE(context, projection_tensors_consistent == true);
+ TF_LITE_ENSURE(context, projecton_tensors_consistent == true);
return kTfLiteOk;
}
@@ -357,7 +357,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const int n_output = recurrent_to_output_weights->dims->data[1];
// Since we have already checked that weights are all there or none, we can
- // check the existence of only one to get the condition.
+ // check the existense of only one to the get the condition.
const bool use_cifg = (input_to_input_weights == nullptr);
const bool use_peephole = (cell_to_output_weights != nullptr);
diff --git a/tensorflow/contrib/lite/kernels/reshape.cc b/tensorflow/contrib/lite/kernels/reshape.cc
index 438f70d311..f3e6ddc9f4 100644
--- a/tensorflow/contrib/lite/kernels/reshape.cc
+++ b/tensorflow/contrib/lite/kernels/reshape.cc
@@ -49,20 +49,20 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TfLiteIntArray* output_size = TfLiteIntArrayCreate(params->num_dimensions);
int num_output_elements = 1;
- int stretch_dim = -1;
+ int strech_dim = -1;
for (int i = 0; i < params->num_dimensions; ++i) {
int value = params->shape[i];
if (value == -1) {
- TF_LITE_ENSURE_EQ(context, stretch_dim, -1);
- stretch_dim = i;
+ TF_LITE_ENSURE_EQ(context, strech_dim, -1);
+ strech_dim = i;
} else {
num_output_elements *= value;
output_size->data[i] = value;
}
}
- if (stretch_dim != -1) {
- output_size->data[stretch_dim] = num_input_elements / num_output_elements;
- num_output_elements *= output_size->data[stretch_dim];
+ if (strech_dim != -1) {
+ output_size->data[strech_dim] = num_input_elements / num_output_elements;
+ num_output_elements *= output_size->data[strech_dim];
}
TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements);
diff --git a/tensorflow/contrib/lite/kernels/reshape_test.cc b/tensorflow/contrib/lite/kernels/reshape_test.cc
index aecbd0399f..0fbcf6e6aa 100644
--- a/tensorflow/contrib/lite/kernels/reshape_test.cc
+++ b/tensorflow/contrib/lite/kernels/reshape_test.cc
@@ -60,7 +60,7 @@ TEST(ReshapeOpTest, TooManyDimensions) {
TEST(ReshapeOpTest, TooManySpecialDimensions) {
EXPECT_DEATH(ReshapeOpModel({1, 2, 4, 1}, {-1, -1, 2, 4}),
- "stretch_dim != -1");
+ "strech_dim != -1");
}
TEST(ReshapeOpTest, SimpleTest) {
diff --git a/tensorflow/contrib/lite/kernels/test_util.cc b/tensorflow/contrib/lite/kernels/test_util.cc
index 0bb28b50b2..373310bd87 100644
--- a/tensorflow/contrib/lite/kernels/test_util.cc
+++ b/tensorflow/contrib/lite/kernels/test_util.cc
@@ -141,8 +141,8 @@ void SingleOpModel::SetBuiltinOp(BuiltinOperator type,
void SingleOpModel::SetCustomOp(
const string& name, const std::vector<uint8_t>& custom_option,
- const std::function<TfLiteRegistration*()>& registration) {
- custom_registrations_[name] = registration;
+ const std::function<TfLiteRegistration*()>& registeration) {
+ custom_registrations_[name] = registeration;
opcodes_.push_back(
CreateOperatorCodeDirect(builder_, BuiltinOperator_CUSTOM, name.data()));
operators_.push_back(CreateOperator(
diff --git a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
index 42941a97db..508a570e2e 100644
--- a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
+++ b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
@@ -360,7 +360,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const int n_output = recurrent_to_output_weights->dims->data[1];
// Since we have already checked that weights are all there or none, we can
- // check the existence of only one to get the condition.
+ // check the existense of only one to the get the condition.
const bool use_cifg = (input_to_input_weights == nullptr);
const bool use_peephole = (cell_to_output_weights != nullptr);