aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-05-17 09:13:27 -0700
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-05-17 09:13:27 -0700
commit8d06c02ffd9eb43194311d0e21b8618d3a8f4937 (patch)
tree3536e517927555d667504b70e5b6f087697aae86 /unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
parenta80d875916de350c1849cd97d8b2515f620911d4 (diff)
Allow vectorized padding on GPU. This helps speed things up a little.
Before: BM_padding/10 5000000 460 217.03 MFlops/s BM_padding/80 5000000 460 13899.40 MFlops/s BM_padding/640 5000000 461 888421.17 MFlops/s BM_padding/4K 5000000 460 54316322.55 MFlops/s After: BM_padding/10 5000000 454 220.20 MFlops/s BM_padding/80 5000000 455 14039.86 MFlops/s BM_padding/640 5000000 452 904968.83 MFlops/s BM_padding/4K 5000000 411 60750049.21 MFlops/s
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h126
1 files changed, 98 insertions, 28 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
index 63646dfc2..2a5c24e2b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
@@ -361,7 +361,7 @@ __global__ void InnerReductionKernel(Reducer reducer, const Self input, Index nu
}
#ifdef EIGEN_HAS_CUDA_FP16
-/*
+
template <int NumPerThread, typename Self,
typename Reducer, typename Index>
__global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
@@ -375,7 +375,7 @@ __global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input,
eigen_assert(NumPerThread % unroll_times == 0);
eigen_assert(unroll_times % 2 == 0);
- const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread);
+ const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread/2);
const Index num_input_blocks = input_col_blocks * num_preserved_coeffs;
const Index num_threads = blockDim.x * gridDim.x;
@@ -383,40 +383,44 @@ __global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input,
// Initialize the output values if they weren't initialized by the ReductionInitKernel
if (gridDim.x == 1) {
- Index i = thread_id;
- for (; i < num_preserved_coeffs; i += 2*num_threads) {
- ((half2*)output)[i] = reducer.initializePacket();
+ Index i = 2*thread_id;
+ for (; i + 1 < num_preserved_coeffs; i += 2*num_threads) {
+ ((half2*)output)[i] = reducer.template initializePacket<half2>();
}
- if (i + 1 < num_preserved_coeffs) {
+ if (i < num_preserved_coeffs) {
output[i] = reducer.initialize();
}
__syncthreads();
}
- for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) {
+ for (Index i = 2*blockIdx.x; i < num_input_blocks; i += 2*gridDim.x) {
const Index row = i / input_col_blocks;
if (row + 1 < num_preserved_coeffs) {
const Index col_block = i % input_col_blocks;
const Index col_begin = col_block * blockDim.x * NumPerThread + threadIdx.x;
- half2 reduced_val1 = reducer.initializePacket();
- half2 reduced_val2 = reducer.initializePacket();
+ half2 reduced_val1 = reducer.template initializePacket<half2>();
+ half2 reduced_val2 = reducer.template initializePacket<half2>();
for (Index j = 0; j < NumPerThread; j += unroll_times) {
const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1);
if (last_col >= num_coeffs_to_reduce) {
Index col = col_begin + blockDim.x * j;
for (; col + 1 < num_coeffs_to_reduce; col += blockDim.x) {
- const half2 val = input.m_impl.packet(row * num_coeffs_to_reduce + col);
- reducer.reduce(val, &reduced_val);
- // do the same for reduce val2 here
+ const half2 val1 = input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col);
+ reducer.reducePacket(val1, &reduced_val1);
+ const half2 val2 = input.m_impl.template packet<Unaligned>((row+1) * num_coeffs_to_reduce + col);
+ reducer.reducePacket(val2, &reduced_val2);
}
if (col < num_coeffs_to_reduce) {
// Peel;
- const half last = input.m_impl.coeff(row * num_coeffs_to_reduce + col+1);
- const half2 val = __halves2half2(last, reducer.initialize());
- reducer.reducePacket(val, &reduced_val);
+ const half last1 = input.m_impl.coeff(row * num_coeffs_to_reduce + col+1);
+ const half2 val1 = __halves2half2(last1, reducer.initialize());
+ reducer.reducePacket(val1, &reduced_val1);
+ const half last2 = input.m_impl.coeff((row+1) * num_coeffs_to_reduce + col+1);
+ const half2 val2 = __halves2half2(last2, reducer.initialize());
+ reducer.reducePacket(val2, &reduced_val2);
}
break;
} else {
@@ -424,41 +428,44 @@ __global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input,
#pragma unroll
for (int k = 0; k < unroll_times; ++k) {
const Index col = col_begin + blockDim.x * (j + k);
- reducer.reduce(input.m_impl.packet(row * num_coeffs_to_reduce + col), &reduced_val);
+ reducer.reducePacket(input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col), &reduced_val1);
+ reducer.reducePacket(input.m_impl.template packet<Unaligned>((row +1)* num_coeffs_to_reduce + col), &reduced_val2);
}
}
}
#pragma unroll
for (int offset = warpSize/2; offset > 0; offset /= 2) {
- reducer.reducePacket(__shfl_down(reduced_val, offset, warpSize), &reduced_val);
+ reducer.reducePacket(__shfl_down(reduced_val1, offset, warpSize), &reduced_val1);
+ reducer.reducePacket(__shfl_down(reduced_val2, offset, warpSize), &reduced_val2);
}
+ half val1 = __low2half(reduced_val1);
+ reducer.reduce(__high2half(reduced_val1), &val1);
+ half val2 = __low2half(reduced_val2);
+ reducer.reduce(__high2half(reduced_val2), &val2);
+ half2 val = __halves2half2(val1, val2);
+
if ((threadIdx.x & (warpSize - 1)) == 0) {
- if (row + 1 < num_preserved_coeffs) {
- atomicReduce(&(output[row]), reduced_val, reducer);
- }
- else {
- atomicReduce(scratch, reduced_val, reducer);
- }
+ atomicReduce(&(((half2*)output)[row]), val, reducer);
}
}
}
}
-*/
+
#endif
template <typename Self, typename Op>
-struct InnerReducer<Self, Op, GpuDevice> {
+struct InnerReductionLauncher {
// Unfortunately nvidia doesn't support well exotic types such as complex,
// so reduce the scope of the optimized version of the code to the simple case
// of floats.
static const bool HasOptimizedImplementation = !Op::IsStateful &&
internal::is_same<typename Self::CoeffReturnType, float>::value;
- template <typename Device, typename OutputType>
- static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) {
- assert(false && "Should only be called to reduce floats on a gpu device");
+ template <typename OutputType>
+ static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index, typename Self::Index) {
+ assert(false && "Should only be called to reduce floats and half floats on a gpu device");
return true;
}
@@ -495,9 +502,72 @@ struct InnerReducer<Self, Op, GpuDevice> {
return false;
}
+
+#ifdef EIGEN_HAS_CUDA_FP16
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ typedef typename Self::Index Index;
+
+ // It's faster to use the usual code.
+ if (num_coeffs_to_reduce <= 32) {
+ return true;
+ }
+
+ const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals;
+ const int block_size = 256;
+ const int num_per_thread = 128;
+ const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / block_size;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+ half2* scratch = static_cast<half2*>(device.scratchpad());
+
+ if (num_blocks > 1) {
+ // We initialize the outputs outside the reduction kernel when we can't be sure that there
+ // won't be a race conditions between multiple thread blocks.
+ const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
+ const int max_blocks = device.getNumCudaMultiProcessors() *
+ device.maxCudaThreadsPerMultiProcessor() / 1024;
+ const int num_blocks = numext::mini<int>(max_blocks, dyn_blocks);
+ LAUNCH_CUDA_KERNEL((ReductionInitKernelHalfFloat<Self, Op, Index>),
+ 1, 1, 0, device, reducer, self, num_preserved_vals, scratch);
+ }
+
+ LAUNCH_CUDA_KERNEL((InnerReductionKernelHalfFloat<num_per_thread, Self, Op, Index>),
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output, scratch);
+
+ return false;
+ }
+#endif
};
+template <typename Self, typename Op>
+struct InnerReducer<Self, Op, GpuDevice> {
+ // Unfortunately nvidia doesn't support well exotic types such as complex,
+ // so reduce the scope of the optimized version of the code to the simple case
+ // of floats and half floats.
+#ifdef EIGEN_HAS_CUDA_FP16
+ static const bool HasOptimizedImplementation = !Op::IsStateful &&
+ (internal::is_same<typename Self::CoeffReturnType, float>::value ||
+ internal::is_same<typename Self::CoeffReturnType, Eigen::half>::value);
+#else
+ static const bool HasOptimizedImplementation = !Op::IsStateful &&
+ internal::is_same<typename Self::CoeffReturnType, float>::value;
+#endif
+
+ template <typename OutputType>
+ static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) {
+ assert(HasOptimizedImplementation && "Should only be called on floats or half floats");
+ const Index num_coeffs = array_prod(self.m_impl.dimensions());
+ // Don't crash when we're called with an input tensor of size 0.
+ if (num_coeffs == 0) {
+ return true;
+ }
+
+ return InnerReductionLauncher<Self, Op>::run(self, reducer, device, output, num_coeffs_to_reduce, num_preserved_vals);
+ }
+};
+
template <int NumPerThread, typename Self,
typename Reducer, typename Index>
__global__ void OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,