diff options
author | A. Unique TensorFlower <nobody@tensorflow.org> | 2016-06-06 12:45:58 -0800 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2016-06-06 14:04:47 -0700 |
commit | aba8beebab0b363f03492b3d5653ec14d148f3c3 (patch) | |
tree | 82648ff178ba1bf4fc5232328f2bc650cde27da6 /tensorflow/core/kernels/batch_norm_op.cc | |
parent | e70c452a18403b368ea845cfb654079386a00fd8 (diff) |
Change some kernels to use TF_CALL* macros, so that the instantiations for some
types can be avoided on mobile platform.
Change: 124172890
Diffstat (limited to 'tensorflow/core/kernels/batch_norm_op.cc')
-rw-r--r-- | tensorflow/core/kernels/batch_norm_op.cc | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/tensorflow/core/kernels/batch_norm_op.cc b/tensorflow/core/kernels/batch_norm_op.cc index a5f526780f..f4aa759643 100644 --- a/tensorflow/core/kernels/batch_norm_op.cc +++ b/tensorflow/core/kernels/batch_norm_op.cc @@ -159,9 +159,9 @@ class BatchNormGradOp : public OpKernel { .TypeConstraint<T>("T"), \ BatchNormOp<CPUDevice, T>); -REGISTER_KERNEL(Eigen::half); -REGISTER_KERNEL(float); -REGISTER_KERNEL(double); +TF_CALL_half(REGISTER_KERNEL); +TF_CALL_float(REGISTER_KERNEL); +TF_CALL_double(REGISTER_KERNEL); #undef REGISTER_KERNEL #if GOOGLE_CUDA @@ -179,8 +179,8 @@ namespace functor { #define DECLARE_GPU_SPECS(T) DECLARE_GPU_SPEC(T); -DECLARE_GPU_SPECS(Eigen::half); -DECLARE_GPU_SPECS(float); +TF_CALL_half(DECLARE_GPU_SPECS); +TF_CALL_float(DECLARE_GPU_SPECS); #undef DECLARE_GPU_SPEC } // namespace functor @@ -191,8 +191,8 @@ DECLARE_GPU_SPECS(float); .TypeConstraint<T>("T"), \ BatchNormOp<GPUDevice, T>); -REGISTER_GPU_KERNEL(Eigen::half); -REGISTER_GPU_KERNEL(float); +TF_CALL_half(REGISTER_GPU_KERNEL); +TF_CALL_float(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL #endif // GOOGLE_CUDA @@ -203,9 +203,9 @@ REGISTER_GPU_KERNEL(float); .TypeConstraint<T>("T"), \ BatchNormGradOp<CPUDevice, T>); -REGISTER_KERNEL(Eigen::half); -REGISTER_KERNEL(float); -REGISTER_KERNEL(double); +TF_CALL_half(REGISTER_KERNEL); +TF_CALL_float(REGISTER_KERNEL); +TF_CALL_double(REGISTER_KERNEL); #undef REGISTER_KERNEL #if GOOGLE_CUDA @@ -226,8 +226,8 @@ namespace functor { #define DECLARE_GPU_SPECS(T) DECLARE_GPU_SPEC(T); -DECLARE_GPU_SPECS(Eigen::half); -DECLARE_GPU_SPECS(float); +TF_CALL_half(DECLARE_GPU_SPECS); +TF_CALL_float(DECLARE_GPU_SPECS); #undef DECLARE_GPU_SPEC } // namespace functor @@ -238,8 +238,8 @@ DECLARE_GPU_SPECS(float); .TypeConstraint<T>("T"), \ BatchNormGradOp<GPUDevice, T>); -REGISTER_GPU_KERNEL(Eigen::half); -REGISTER_GPU_KERNEL(float); +TF_CALL_half(REGISTER_GPU_KERNEL); +TF_CALL_float(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL #endif // GOOGLE_CUDA |