aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/kernels/softmax_op.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <nobody@tensorflow.org>2016-03-17 08:30:51 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-03-18 08:44:53 -0700
commit6ff94239ef72159d50edcba664c4081bb70b4fde (patch)
tree67f108efe8cbabb8fd27939dcffb94109ada296b /tensorflow/core/kernels/softmax_op.cc
parent307f58ccbb567ba030de66850ecadb1dd2d4868d (diff)
Move Relu and SoftMax Op declarations into header files so they can be registered by experimental devices.
Right now tensorflow/core/kernels explicitly depends on all Eigen devices that might want to implement any of the templated Eigen Ops. This is because the template classes that need to be specialized are defined in .cc files, so the specializations themselves have to appear there too. Moving the classes to .h files allows us to use arbitrary Eigen devices defined outside of tensorflow/core, which fits better with the intent behind core/kernels. Over time more kernels may need to be refactored this way for the same reason. Change: 117452814
Diffstat (limited to 'tensorflow/core/kernels/softmax_op.cc')
-rw-r--r--tensorflow/core/kernels/softmax_op.cc18
1 files changed, 0 insertions, 18 deletions
diff --git a/tensorflow/core/kernels/softmax_op.cc b/tensorflow/core/kernels/softmax_op.cc
index 52d246eae3..cfcfeb5760 100644
--- a/tensorflow/core/kernels/softmax_op.cc
+++ b/tensorflow/core/kernels/softmax_op.cc
@@ -28,24 +28,6 @@ namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
-template <typename Device, typename T>
-class SoftmaxOp : public OpKernel {
- public:
- explicit SoftmaxOp(OpKernelConstruction* context) : OpKernel(context) {}
-
- void Compute(OpKernelContext* context) override {
- const Tensor& logits_in = context->input(0);
- OP_REQUIRES(context, TensorShapeUtils::IsMatrix(logits_in.shape()),
- errors::InvalidArgument("logits must be 2-dimensional"));
- Tensor* softmax_out = nullptr;
- OP_REQUIRES_OK(
- context, context->allocate_output(0, logits_in.shape(), &softmax_out));
- functor::SoftmaxFunctor<Device, T> functor;
- functor(context->eigen_device<Device>(), logits_in.matrix<T>(),
- softmax_out->matrix<T>());
- }
-};
-
// Partial specialization for a CPUDevice, that uses the Eigen implementation
// from SoftmaxEigenImpl.
namespace functor {