aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/kernels/quantized_bias_add_op.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2016-09-28 00:15:58 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-09-28 01:35:32 -0700
commit419d5d072375ee0044fecb94e4bfe21a7b3b0b9e (patch)
treecb66e6e7238bf2e7938b58f3638bd31f65d542c2 /tensorflow/core/kernels/quantized_bias_add_op.cc
parentc1e4f0f6a1078fd6715e8145fbef874e4d447ab8 (diff)
Automated rollback of change 134501895
Change: 134506649
Diffstat (limited to 'tensorflow/core/kernels/quantized_bias_add_op.cc')
-rw-r--r--tensorflow/core/kernels/quantized_bias_add_op.cc89
1 files changed, 0 insertions, 89 deletions
diff --git a/tensorflow/core/kernels/quantized_bias_add_op.cc b/tensorflow/core/kernels/quantized_bias_add_op.cc
deleted file mode 100644
index 0b34bfcad8..0000000000
--- a/tensorflow/core/kernels/quantized_bias_add_op.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// Implements a quantized eight-bit version of the bias addition operation.
-
-#include "tensorflow/core/kernels/quantization_utils.h"
-#include "tensorflow/core/framework/numeric_op.h"
-#include "tensorflow/core/framework/op_kernel.h"
-#include "tensorflow/core/framework/tensor.h"
-#include "tensorflow/core/kernels/ops_util.h"
-#include "tensorflow/core/lib/core/errors.h"
-
-namespace tensorflow {
-
-typedef Eigen::ThreadPoolDevice CPUDevice;
-
-template <class T1, class T2, class T3>
-class QuantizedBiasAddOp : public OpKernel {
- public:
- explicit QuantizedBiasAddOp(OpKernelConstruction* context)
- : OpKernel(context) {}
-
- void Compute(OpKernelContext* context) override {
- const Tensor& input = context->input(0);
- const Tensor& bias = context->input(1);
- const float input_min = context->input(2).flat<float>()(0);
- const float input_max = context->input(3).flat<float>()(0);
- const float bias_min = context->input(4).flat<float>()(0);
- const float bias_max = context->input(5).flat<float>()(0);
-
- OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input.shape()),
- errors::InvalidArgument("Input tensor must be at least 2D: ",
- input.shape().DebugString()));
- OP_REQUIRES(context, TensorShapeUtils::IsVector(bias.shape()),
- errors::InvalidArgument("Biases must be 1D: ",
- bias.shape().DebugString()));
- const auto last_dim = input.shape().dims() - 1;
- OP_REQUIRES(
- context, bias.shape().dim_size(0) == input.shape().dim_size(last_dim),
- errors::InvalidArgument(
- "Must provide as many biases as the last dimension "
- "of the input tensor: ",
- bias.shape().DebugString(), " vs. ", input.shape().DebugString()));
-
- Tensor* output = nullptr;
- OP_REQUIRES_OK(context,
- context->allocate_output(0, input.shape(), &output));
-
- float total_min;
- float total_max;
- QuantizedAddUsingEigen<T1, T2, T3>(
- context->template eigen_device<CPUDevice>(), input, input_min,
- input_max, bias, bias_min, bias_max, output, &total_min, &total_max);
-
- Tensor* output_min = nullptr;
- OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));
- output_min->flat<float>()(0) = total_min;
-
- Tensor* output_max = nullptr;
- OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
- output_max->flat<float>()(0) = total_max;
- }
-};
-
-REGISTER_KERNEL_BUILDER(Name("QuantizedBiasAdd")
- .Device(DEVICE_CPU)
- .TypeConstraint<quint8>("T1")
- .TypeConstraint<quint8>("T2")
- .TypeConstraint<qint32>("out_type"),
- QuantizedBiasAddOp<quint8, quint8, qint32>);
-REGISTER_KERNEL_BUILDER(Name("QuantizedBiasAdd")
- .Device(DEVICE_CPU)
- .TypeConstraint<qint8>("T1")
- .TypeConstraint<qint8>("T2")
- .TypeConstraint<qint32>("out_type"),
- QuantizedBiasAddOp<qint8, qint8, qint32>);
-} // namespace tensorflow