aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-01-14 21:45:14 -0800
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-01-14 21:45:14 -0800
commitaed4cb1269d52d0ff0e69c8aa6d89c804185b18f (patch)
tree996e3289b09bca3831a5e2128a0e78a15f9f0e4c /unsupported
parent8fe2532e70a8e0261717003d96d4df41ab978756 (diff)
Use warp shuffles instead of shared memory access to speedup the inner reduction kernel.
Diffstat (limited to 'unsupported')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h22
1 files changed, 8 insertions, 14 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
index 54ab34ba1..82ea09f07 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h
@@ -132,8 +132,6 @@ struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
};
-extern __shared__ float temp[];
-
template <int NumPerThread, typename Self,
typename Reducer, typename Index>
__global__ void InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs,
@@ -183,17 +181,13 @@ __global__ void InnerReductionKernel(Reducer reducer, const Self input, Index nu
}
}
- temp[threadIdx.x] = reduced_val;
-
- __syncthreads();
- const int warp_id = threadIdx.x & 31;
- if (warp_id < 16) reducer.reduce(temp[threadIdx.x + 16], &temp[threadIdx.x]);
- if (warp_id < 8) reducer.reduce(temp[threadIdx.x + 8], &temp[threadIdx.x]);
- if (warp_id < 4) reducer.reduce(temp[threadIdx.x + 4], &temp[threadIdx.x]);
- if (warp_id < 2) reducer.reduce(temp[threadIdx.x + 2], &temp[threadIdx.x]);
- if (warp_id < 1) {
- reducer.reduce(temp[threadIdx.x + 1], &temp[threadIdx.x]);
- atomicReduce(&(output[row]), temp[threadIdx.x], reducer);
+#pragma unroll
+ for (int offset = warpSize/2; offset > 0; offset /= 2) {
+ reducer.reduce(__shfl_down(reduced_val, offset), &reduced_val);
+ }
+
+ if ((threadIdx.x & (warpSize - 1)) == 0) {
+ atomicReduce(&(output[row]), reduced_val, reducer);
}
}
@@ -224,7 +218,7 @@ struct InnerReducer<Self, Op, GpuDevice> {
EIGEN_UNUSED_VARIABLE(num_blocks)
LAUNCH_CUDA_KERNEL((InnerReductionKernel<num_per_thread, Self, Op, Index>),
- num_blocks, block_size, block_size*sizeof(float), device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
+ num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output);
}
};