aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-05-26 11:53:59 -0700
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-05-26 11:53:59 -0700
commitc1c7f06c35f9e8164af1b1ff4d3c507f05372707 (patch)
treebc525578864ca64d6eb8584c7625442373eb3cfb /unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
parent22d02c98557d2bd9afd581d7ad7c9c144a8da671 (diff)
Improved the performance of inner reductions.
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h168
1 files changed, 127 insertions, 41 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
index c0a36cd9c..30d481cbe 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
@@ -147,8 +147,9 @@ __global__ void FullReductionKernel(Reducer reducer, const Self input, Index num
#ifdef EIGEN_HAS_CUDA_FP16
template <typename Self,
typename Reducer, typename Index>
-__global__ void ReductionInitKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half2* scratch) {
- eigen_assert(threadIdx.x == 1);
+__global__ void ReductionInitFullReduxKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half2* scratch) {
+ eigen_assert(blockDim.x == 1);
+ eigen_assert(gridDim.x == 1);
if (num_coeffs % 2 != 0) {
half last = input.m_impl.coeff(num_coeffs-1);
*scratch = __halves2half2(last, reducer.initialize());
@@ -157,6 +158,21 @@ __global__ void ReductionInitKernelHalfFloat(Reducer reducer, const Self input,
}
}
+template <typename Self,
+ typename Reducer, typename Index>
+__global__ void ReductionInitKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half* output) {
+ const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
+ const Index num_threads = blockDim.x * gridDim.x;
+ const Index num_packets = num_coeffs / 2;
+ for (Index i = thread_id; i < num_packets; i += num_threads) {
+ ((half2*)output)[i] = reducer.template initializePacket<half2>();
+ }
+
+ if (thread_id == 0 && num_coeffs % 2 != 0) {
+ output[num_coeffs-1] = reducer.initialize();
+ }
+}
+
template <int BlockSize, int NumPerThread, typename Self,
typename Reducer, typename Index>
__global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs,
@@ -251,7 +267,7 @@ struct FullReductionLauncher {
if (num_blocks > 1) {
// We initialize the output and the scrathpad outside the reduction kernel when we can't be sure that there
// won't be a race conditions between multiple thread blocks.
- LAUNCH_CUDA_KERNEL((ReductionInitKernelHalfFloat<Self, Op, Index>),
+ LAUNCH_CUDA_KERNEL((ReductionInitFullReduxKernelHalfFloat<Self, Op, Index>),
1, 1, 0, device, reducer, self, num_coeffs, scratch);
}
@@ -361,11 +377,11 @@ __global__ void InnerReductionKernel(Reducer reducer, const Self input, Index nu
}
#ifdef EIGEN_HAS_CUDA_FP16
-/*
+
template <int NumPerThread, typename Self,
typename Reducer, typename Index>
__global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
- half* output, half2* scratch) {
+ half* output) {
eigen_assert(blockDim.y == 1);
eigen_assert(blockDim.z == 1);
eigen_assert(gridDim.y == 1);
@@ -375,101 +391,105 @@ __global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input,
eigen_assert(NumPerThread % unroll_times == 0);
eigen_assert(unroll_times % 2 == 0);
- const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread);
- const Index num_input_blocks = input_col_blocks * num_preserved_coeffs;
+ const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread * 2);
+ const Index num_input_blocks = divup<Index>(input_col_blocks * num_preserved_coeffs, 2);
const Index num_threads = blockDim.x * gridDim.x;
const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
// Initialize the output values if they weren't initialized by the ReductionInitKernel
if (gridDim.x == 1) {
- Index i = thread_id;
- for (; i < num_preserved_coeffs; i += 2*num_threads) {
- ((half2*)output)[i] = reducer.initializePacket();
+ Index i = 2*thread_id;
+ for (; i + 1 < num_preserved_coeffs; i += 2*num_threads) {
+ half* loc = output + i;
+ *((half2*)loc) = reducer.template initializePacket<half2>();
}
- if (i + 1 < num_preserved_coeffs) {
+ if (i < num_preserved_coeffs) {
output[i] = reducer.initialize();
}
__syncthreads();
}
for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
- const Index row = i / input_col_blocks;
+ const Index row = 2 * (i / input_col_blocks);
if (row + 1 < num_preserved_coeffs) {
const Index col_block = i % input_col_blocks;
- const Index col_begin = col_block * blockDim.x * NumPerThread + threadIdx.x;
+ const Index col_begin = 2 * (col_block * blockDim.x * NumPerThread + threadIdx.x);
- half2 reduced_val1 = reducer.initializePacket();
- half2 reduced_val2 = reducer.initializePacket();
+ half2 reduced_val1 = reducer.template initializePacket<half2>();
+ half2 reduced_val2 = reducer.template initializePacket<half2>();
for (Index j = 0; j < NumPerThread; j += unroll_times) {
- const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1);
+ const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1) * 2;
if (last_col >= num_coeffs_to_reduce) {
Index col = col_begin + blockDim.x * j;
for (; col + 1 < num_coeffs_to_reduce; col += blockDim.x) {
- const half2 val = input.m_impl.packet(row * num_coeffs_to_reduce + col);
- reducer.reduce(val, &reduced_val);
- // do the same for reduce val2 here
+ const half2 val1 = input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col);
+ reducer.reducePacket(val1, &reduced_val1);
+ const half2 val2 = input.m_impl.template packet<Unaligned>((row+1) * num_coeffs_to_reduce + col);
+ reducer.reducePacket(val2, &reduced_val2);
}
if (col < num_coeffs_to_reduce) {
// Peel;
- const half last = input.m_impl.coeff(row * num_coeffs_to_reduce + col+1);
- const half2 val = __halves2half2(last, reducer.initialize());
- reducer.reducePacket(val, &reduced_val);
+ const half last1 = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
+ const half2 val1 = __halves2half2(last1, reducer.initialize());
+ reducer.reducePacket(val1, &reduced_val1);
+ const half last2 = input.m_impl.coeff((row+1) * num_coeffs_to_reduce + col);
+ const half2 val2 = __halves2half2(last2, reducer.initialize());
+ reducer.reducePacket(val2, &reduced_val2);
}
break;
} else {
// Faster version of the loop with no branches after unrolling.
#pragma unroll
for (int k = 0; k < unroll_times; ++k) {
- const Index col = col_begin + blockDim.x * (j + k);
- reducer.reduce(input.m_impl.packet(row * num_coeffs_to_reduce + col), &reduced_val);
+ const Index col = col_begin + blockDim.x * (j + k) * 2;
+ reducer.reducePacket(input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col), &reduced_val1);
+ reducer.reducePacket(input.m_impl.template packet<Unaligned>((row + 1)* num_coeffs_to_reduce + col), &reduced_val2);
}
}
}
#pragma unroll
for (int offset = warpSize/2; offset > 0; offset /= 2) {
- reducer.reducePacket(__shfl_down(reduced_val, offset, warpSize), &reduced_val);
+ reducer.reducePacket(__shfl_down(reduced_val1, offset, warpSize), &reduced_val1);
+ reducer.reducePacket(__shfl_down(reduced_val2, offset, warpSize), &reduced_val2);
}
+ half val1 = __low2half(reduced_val1);
+ reducer.reduce(__high2half(reduced_val1), &val1);
+ half val2 = __low2half(reduced_val2);
+ reducer.reduce(__high2half(reduced_val2), &val2);
+ half2 val = __halves2half2(val1, val2);
+
if ((threadIdx.x & (warpSize - 1)) == 0) {
- if (row + 1 < num_preserved_coeffs) {
- atomicReduce(&(output[row]), reduced_val, reducer);
- }
- else {
- atomicReduce(scratch, reduced_val, reducer);
- }
+ half* loc = output + row;
+ atomicReduce((half2*)loc, val, reducer);
}
}
}
}
-*/
+
#endif
template <typename Self, typename Op>
-struct InnerReducer<Self, Op, GpuDevice> {
+struct InnerReductionLauncher {
// Unfortunately nvidia doesn't support well exotic types such as complex,
// so reduce the scope of the optimized version of the code to the simple case
// of floats.
static const bool HasOptimizedImplementation = !Op::IsStateful &&
internal::is_same<typename Self::CoeffReturnType, float>::value;
- template <typename Device, typename OutputType>
- static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
- assert(false && "Should only be called to reduce floats on a gpu device");
+ template <typename OutputType>
+ static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index, typename Self::Index) {
+ assert(false && "Should only be called to reduce floats and half floats on a gpu device");
return true;
}
static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
typedef typename Self::Index Index;
- // It's faster to use the usual code.
- if (num_coeffs_to_reduce <= 32) {
- return true;
- }
-
const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
const int block_size = 256;
const int num_per_thread = 128;
@@ -495,9 +515,75 @@ struct InnerReducer<Self, Op, GpuDevice> {
return false;
}
+
+#ifdef EIGEN_HAS_CUDA_FP16
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ typedef typename Self::Index Index;
+
+ if (num_preserved_vals % 2 != 0) {
+ // Not supported yet, revert to the slower code path
+ std::cout << "BYPASSING OPTIMIZED CODE PATH" << std::endl;
+ return true;
+ }
+
+ const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
+ const int block_size = /*256*/128;
+ const int num_per_thread = /*128*/64;
+ const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / block_size;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+
+ if (num_blocks > 1) {
+ // We initialize the outputs outside the reduction kernel when we can't be sure that there
+ // won't be a race conditions between multiple thread blocks.
+ const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / 1024;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+ LAUNCH_CUDA_KERNEL((ReductionInitKernelHalfFloat<Self, Op, Index>),
+ 1, 1, 0, device, reducer, self, num_preserved_vals, output);
+ }
+
+ LAUNCH_CUDA_KERNEL((InnerReductionKernelHalfFloat<num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
+
+ return false;
+ }
+#endif
};
+template <typename Self, typename Op>
+struct InnerReducer<Self, Op, GpuDevice> {
+ // Unfortunately nvidia doesn't support well exotic types such as complex,
+ // so reduce the scope of the optimized version of the code to the simple case
+ // of floats and half floats.
+#ifdef EIGEN_HAS_CUDA_FP16
+ static const bool HasOptimizedImplementation = !Op::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, Eigen::half>::value);
+#else
+ static const bool HasOptimizedImplementation = !Op::IsStateful &&
+ internal::is_same<typename Self::CoeffReturnType, float>::value;
+#endif
+
+ template <typename OutputType>
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ assert(HasOptimizedImplementation && "Should only be called on floats or half floats");
+ const Index num_coeffs = array_prod(self.m_impl.dimensions());
+ // Don't crash when we're called with an input tensor of size 0.
+ if (num_coeffs == 0) {
+ return true;
+ }
+ // It's faster to use the usual code.
+ if (num_coeffs_to_reduce <= 128) {
+ return true;
+ }
+ return InnerReductionLauncher<Self, Op>::run(self, reducer, device, output, num_coeffs_to_reduce, num_preserved_vals);
+ }
+};
+
template <int NumPerThread, typename Self,
typename Reducer, typename Index>
__global__ void OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,