aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported
diff options
context:
space:
mode:
authorGravatar Deven Desai <deven.desai.amd@gmail.com>2018-07-11 10:39:54 -0400
committerGravatar Deven Desai <deven.desai.amd@gmail.com>2018-07-11 10:39:54 -0400
commit876f392c396318f33454168db36ed54308e54e0d (patch)
treea727bc91873b5c0aeec05312176a0f39e2cb64d5 /unsupported
parent1fe0b749042320501c59378f2860d9322b0c6e19 (diff)
Updates corresponding to the latest round of PR feedback
The major changes are 1. Moving CUDA/PacketMath.h to GPU/PacketMath.h 2. Moving CUDA/MathFunctions.h to GPU/MathFunction.h 3. Moving CUDA/CudaSpecialFunctions.h to GPU/GpuSpecialFunctions.h The above three changes effectively enable the Eigen "Packet" layer for the HIP platform 4. Merging the "hip_basic" and "cuda_basic" unit tests into one ("gpu_basic") 5. Updating the "EIGEN_DEVICE_FUNC" marking in some places The change has been tested on the HIP and CUDA platforms.
Diffstat (limited to 'unsupported')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h14
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h147
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h1
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h11
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h41
-rw-r--r--unsupported/Eigen/SpecialFunctions4
-rw-r--r--unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h6
10 files changed, 34 insertions, 205 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
index a1e292108..8c1af1da8 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
@@ -28,6 +28,20 @@ class TensorContractionBlocking {
typedef typename LhsMapper::Scalar LhsScalar;
typedef typename RhsMapper::Scalar RhsScalar;
+ /*
+ adding EIGEN_DEVICE_FUNC unconditionally to 'TensorContractionBlocking' constructor in `TensorContractionBlocking.h`
+ requires adding EIGEN_DEVICE_FUNC to `computeProductBlockingSizes` in `GeneralBlockPanelKernel.h`
+ which in turn, requires adding EIGEN_DEVICE_FUNC to `evaluateProductBlockingSizesHeuristic` in `GeneralBlockPanelKernel.h`
+ which in turn, requires adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h`
+ (else HIPCC will error out)
+
+ However adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h`
+ results in NVCC erroring out with the following error
+
+ ../Eigen/src/Core/products/GeneralBlockPanelKernel.h(57): error #2901:
+ dynamic initialization is not supported for function-scope static variables within a __device__/__global__ function
+ */
+
#if !defined(EIGEN_HIPCC)
EIGEN_DEVICE_FUNC
#endif
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h
index 238754424..a4f92ee44 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h
@@ -546,45 +546,6 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
results[i].x = results[i].y = results[i].z = results[i].w = 0;
}
-#if defined(EIGEN_HIPCC)
-
-#define prefetch_lhs(reg, row, col) \
- if (!CHECK_LHS_BOUNDARY) { \
- if (col < k_size) { \
- reg.x =lhs(row + 0, col); \
- reg.y =lhs(row + 1, col); \
- reg.z =lhs(row + 2, col); \
- reg.w =lhs(row + 3, col); \
- } \
- } else { \
- if (col < k_size) { \
- if (row + 3 < m_size) { \
- reg.x =lhs(row + 0, col); \
- reg.y =lhs(row + 1, col); \
- reg.z =lhs(row + 2, col); \
- reg.w =lhs(row + 3, col); \
- } else if (row + 2 < m_size) { \
- reg.x =lhs(row + 0, col); \
- reg.y =lhs(row + 1, col); \
- reg.z =lhs(row + 2, col); \
- } else if (row + 1 < m_size) { \
- reg.x =lhs(row + 0, col); \
- reg.y =lhs(row + 1, col); \
- } else if (row < m_size) { \
- reg.x =lhs(row + 0, col); \
- } \
- } \
- } \
-
-#define prefetch_rhs_hipcc(reg, row, col) \
- reg.x =rhs(row + 0, col); \
- reg.y =rhs(row + 1, col); \
- reg.z =rhs(row + 2, col); \
- reg.w =rhs(row + 3, col); \
-
-
-#else
-
#define prefetch_lhs(reg, row, col) \
if (!CHECK_LHS_BOUNDARY) { \
if (col < k_size) { \
@@ -607,19 +568,12 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
} \
} \
-#endif
-
Index lhs_vert = base_m+threadIdx.x*4;
for (Index k = 0; k < k_size; k += 16) {
-#if defined(EIGEN_HIPCC)
- lhs_pf0 = make_float4(0, 0, 0, 0);
- rhs_pf0 = make_float4(0, 0, 0, 0);
-#else
lhs_pf0 = internal::pset1<float4>(0);
rhs_pf0 = internal::pset1<float4>(0);
-#endif
Index lhs_horiz = threadIdx.y+k;
prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz)
@@ -630,11 +584,7 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
-#if defined(EIGEN_HIPCC)
- prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0)
-#else
rhs_pf0 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
-#endif
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@@ -649,11 +599,7 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
} else {
if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
-#if defined(EIGEN_HIPCC)
- prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0)
-#else
rhs_pf0 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
-#endif
} else if ((rhs_vert + 2) < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
@@ -753,10 +699,6 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
#undef prefetch_lhs
#undef add_vals
-#if defined(EIGEN_HIPCC)
-#undef prefetch_rhs_hipcc
-#endif
-
Index horiz_base = threadIdx.y*4+base_n;
if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
for (int i = 0; i < 4; i++) {
@@ -845,33 +787,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
results[i].x = results[i].y = results[i].z = results[i].w = 0;
}
-#if defined(EIGEN_HIPCC)
-
-#define prefetch_lhs_hipcc(reg, row, col) \
- reg.x =lhs(row + 0, col); \
- reg.y =lhs(row + 1, col); \
- reg.z =lhs(row + 2, col); \
- reg.w =lhs(row + 3, col);
-
-#define prefetch_rhs_hipcc(reg, row, col) \
- reg.x =rhs(row + 0, col); \
- reg.y =rhs(row + 1, col); \
- reg.z =rhs(row + 2, col); \
- reg.w =rhs(row + 3, col);
-
-#endif
-
Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32;
for (Index k = 0; k < k_size; k += 32) {
-#if defined(EIGEN_HIPCC)
- lhs_pf0 = make_float4(0, 0, 0, 0);
- lhs_pf1 = make_float4(0, 0, 0, 0);
- lhs_pf2 = make_float4(0, 0, 0, 0);
- lhs_pf3 = make_float4(0, 0, 0, 0);
-
- rhs_pf0 = make_float4(0, 0, 0, 0);
- rhs_pf1 = make_float4(0, 0, 0, 0);
-#else
lhs_pf0 = internal::pset1<float4>(0);
lhs_pf1 = internal::pset1<float4>(0);
lhs_pf2 = internal::pset1<float4>(0);
@@ -879,85 +796,40 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
rhs_pf0 = internal::pset1<float4>(0);
rhs_pf1 = internal::pset1<float4>(0);
-#endif
if (!CHECK_LHS_BOUNDARY) {
if ((threadIdx.y/4+k+24) < k_size) {
-#if defined(EIGEN_HIPCC)
- prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
- prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
- prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16))
- prefetch_lhs_hipcc(lhs_pf3, lhs_vert, (threadIdx.y/4+k+24))
-#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
lhs_pf3 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
-#endif
} else if ((threadIdx.y/4+k+16) < k_size) {
-#if defined(EIGEN_HIPCC)
- prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
- prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
- prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16))
-#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
-#endif
} else if ((threadIdx.y/4+k+8) < k_size) {
-#if defined(EIGEN_HIPCC)
- prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
- prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
-#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
-#endif
} else if ((threadIdx.y/4+k) < k_size) {
-#if defined(EIGEN_HIPCC)
- prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
-#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
-#endif
}
} else {
// just CHECK_LHS_BOUNDARY
if (lhs_vert + 3 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
-#if defined(EIGEN_HIPCC)
- prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
- prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
- prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16))
- prefetch_lhs_hipcc(lhs_pf3, lhs_vert, (threadIdx.y/4+k+24))
-#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
lhs_pf3 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
-#endif
} else if ((threadIdx.y/4+k+16) < k_size) {
-#if defined(EIGEN_HIPCC)
- prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
- prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
- prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16))
-#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
-#endif
} else if ((threadIdx.y/4+k+8) < k_size) {
-#if defined(EIGEN_HIPCC)
- prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
- prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8))
-#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
-#endif
} else if ((threadIdx.y/4+k) < k_size) {
-#if defined(EIGEN_HIPCC)
- prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k))
-#else
lhs_pf0 =lhs.template loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
-#endif
}
} else if (lhs_vert + 2 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
@@ -1046,13 +918,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
-#if defined(EIGEN_HIPCC)
- prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0)
- prefetch_rhs_hipcc(rhs_pf1, rhs_vert, rhs_horiz1)
-#else
rhs_pf0 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
rhs_pf1 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
-#endif
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@@ -1074,13 +941,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
if (rhs_horiz1 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
-#if defined(EIGEN_HIPCC)
- prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0)
- prefetch_rhs_hipcc(rhs_pf1, rhs_vert, rhs_horiz1)
-#else
rhs_pf0 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
rhs_pf1 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
-#endif
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@@ -1101,11 +963,7 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
} else if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
-#if defined(EIGEN_HIPCC)
- prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0)
-#else
rhs_pf0 = rhs.template loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
-#endif
} else if ((rhs_vert + 2) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@@ -1213,11 +1071,6 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
__syncthreads();
} // end loop over k
-#if defined(EIGEN_HIPCC)
-#undef prefetch_lhs_hipcc
-#undef prefetch_rhs_hipcc
-#endif
-
__syncthreads();
Index horiz_base = (threadIdx.y/4)*8+base_n;
if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h
index f009ae855..9966955f7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h
@@ -32,8 +32,7 @@
#define gpuGetDeviceCount hipGetDeviceCount
#define gpuGetErrorString hipGetErrorString
#define gpuGetDeviceProperties hipGetDeviceProperties
-// FIXME : use hipStreamDefault instead of 0x00
-#define gpuStreamDefault 0x00
+#define gpuStreamDefault hipStreamDefault
#define gpuGetDevice hipGetDevice
#define gpuSetDevice hipSetDevice
#define gpuMalloc hipMalloc
@@ -47,6 +46,7 @@
#define gpuSharedMemConfig hipSharedMemConfig
#define gpuDeviceSetSharedMemConfig hipDeviceSetSharedMemConfig
#define gpuStreamSynchronize hipStreamSynchronize
+#define gpuDeviceSynchronize hipDeviceSynchronize
#define gpuMemcpy hipMemcpy
#else
@@ -73,6 +73,7 @@
#define gpuSharedMemConfig cudaSharedMemConfig
#define gpuDeviceSetSharedMemConfig cudaDeviceSetSharedMemConfig
#define gpuStreamSynchronize cudaStreamSynchronize
+#define gpuDeviceSynchronize cudaDeviceSynchronize
#define gpuMemcpy cudaMemcpy
#endif
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h
index 9bc0708ed..db394bcbb 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h
@@ -32,6 +32,7 @@
#undef gpuSharedMemConfig
#undef gpuDeviceSetSharedMemConfig
#undef gpuStreamSynchronize
+#undef gpuDeviceSynchronize
#undef gpuMemcpy
#undef EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
index 835efbf72..8810d78cf 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
@@ -351,10 +351,7 @@ struct IndexPairList : internal::IndexTuple<FirstType, OtherTypes...> {
namespace internal {
template<typename FirstType, typename... OtherTypes>
- #if defined(EIGEN_HIPCC)
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
- #endif
- size_t array_prod(const IndexList<FirstType, OtherTypes...>& sizes) {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t array_prod(const IndexList<FirstType, OtherTypes...>& sizes) {
size_t result = 1;
for (int i = 0; i < array_size<IndexList<FirstType, OtherTypes...> >::value; ++i) {
result *= sizes[i];
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
index 2a979845b..cda49f8fe 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
@@ -859,10 +859,7 @@ struct TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices,
return inputIndex;
}
- #if defined(EIGEN_HIPCC)
- EIGEN_DEVICE_FUNC
- #endif
- static EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) {
#ifndef __SYCL_DEVICE_ONLY__
return numext::maxi(min, numext::mini(max,value));
#else
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
index fdd338b96..ce573d730 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
@@ -497,6 +497,9 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
EIGEN_STRONG_INLINE
#if !defined(EIGEN_HIPCC)
+ // Marking this as EIGEN_DEVICE_FUNC for HIPCC requires also doing the same for all the functions
+ // being called within here, which then leads to proliferation of EIGEN_DEVICE_FUNC markings, one
+ // of which will eventually result in an NVCC error
EIGEN_DEVICE_FUNC
#endif
bool evalSubExprsIfNeeded(typename MakePointer_<CoeffReturnType>::Type data) {
@@ -778,17 +781,9 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
// Indexed by reduced dimensions.
array<Index, NumReducedDims> m_reducedDims;
-#if defined(EIGEN_HIPCC)
- public:
-#endif
-
// Evaluator for the input expression.
TensorEvaluator<ArgType, Device> m_impl;
-#if defined(EIGEN_HIPCC)
- private:
-#endif
-
// Operation to apply for computing the reduction.
Op m_reducer;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
index ca854d670..a691e530a 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
@@ -169,7 +169,9 @@ __global__ void FullReductionKernel(Reducer reducer, const Self input, Index num
#pragma unroll
for (int offset = warpSize/2; offset > 0; offset /= 2) {
#if defined(EIGEN_HIPCC)
- // XXX use std::is_floating_point to determine the type of accum
+ // use std::is_floating_point to determine the type of reduced_val
+ // This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error
+ // and list the float and int versions of __shfl_down as the candidate functions.
if (std::is_floating_point<typename Self::CoeffReturnType>::value) {
reducer.reduce(__shfl_down(static_cast<float>(accum), offset, warpSize), &accum);
} else {
@@ -238,20 +240,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input,
// Initialize the output value if it wasn't initialized by the ReductionInitKernel
-#if defined(EIGEN_HIPCC)
-
- if (gridDim.x == 1 && first_index == 0) {
- if (num_coeffs % 2 != 0) {
- half last = input.m_impl.coeff(num_coeffs-1);
- *scratch = __halves2half2(last, reducer.initialize());
- } else {
- *scratch = reducer.template initializePacket<half2>();
- }
- __syncthreads();
- }
-
-#else
-
if (gridDim.x == 1) {
if (first_index == 0) {
if (num_coeffs % 2 != 0) {
@@ -264,8 +252,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input,
__syncthreads();
}
-#endif
-
half2 accum = reducer.template initializePacket<half2>();
const Index max_iter = numext::mini<Index>((num_coeffs - first_index) / 2, NumPerThread*BlockSize / 2);
for (Index i = 0; i < max_iter; i += BlockSize) {
@@ -295,17 +281,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input,
atomicReduce(scratch, accum, reducer);
}
-#if defined(EIGEN_HIPCC)
- __syncthreads();
-
- if (gridDim.x == 1 && first_index == 0) {
- half tmp = __low2half(*scratch);
- reducer.reduce(__high2half(*scratch), &tmp);
- *output = tmp;
- }
-
-#else
-
if (gridDim.x == 1) {
__syncthreads();
if (first_index == 0) {
@@ -314,8 +289,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input,
*output = tmp;
}
}
-
-#endif
}
template <typename Op>
@@ -485,7 +458,9 @@ __global__ void InnerReductionKernel(Reducer reducer, const Self input, Index nu
#pragma unroll
for (int offset = warpSize/2; offset > 0; offset /= 2) {
#if defined(EIGEN_HIPCC)
- // XXX use std::is_floating_point to determine the type of reduced_val
+ // use std::is_floating_point to determine the type of reduced_val
+ // This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error
+ // and list the float and int versions of __shfl_down as the candidate functions.
if (std::is_floating_point<Type>::value) {
reducer.reduce(__shfl_down(static_cast<float>(reduced_val), offset), &reduced_val);
} else {
@@ -847,8 +822,4 @@ struct OuterReducer<Self, Op, GpuDevice> {
} // end namespace internal
} // end namespace Eigen
-#if defined(EIGEN_HIPCC)
-#undef warpSize
-#endif
-
#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H
diff --git a/unsupported/Eigen/SpecialFunctions b/unsupported/Eigen/SpecialFunctions
index 9441ba8f5..44fd99b43 100644
--- a/unsupported/Eigen/SpecialFunctions
+++ b/unsupported/Eigen/SpecialFunctions
@@ -53,8 +53,8 @@ namespace Eigen {
#include "src/SpecialFunctions/SpecialFunctionsFunctors.h"
#include "src/SpecialFunctions/SpecialFunctionsArrayAPI.h"
-#if defined EIGEN_VECTORIZE_CUDA
- #include "src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h"
+#if defined EIGEN_VECTORIZE_GPU
+ #include "src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h"
#endif
namespace Eigen {
diff --git a/unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h b/unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h
index ad011e305..40abcee3a 100644
--- a/unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h
+++ b/unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h
@@ -7,8 +7,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_CUDA_SPECIALFUNCTIONS_H
-#define EIGEN_CUDA_SPECIALFUNCTIONS_H
+#ifndef EIGEN_GPU_SPECIALFUNCTIONS_H
+#define EIGEN_GPU_SPECIALFUNCTIONS_H
namespace Eigen {
@@ -223,4 +223,4 @@ pi1e<double2>(const double2& x) {
} // end namespace Eigen
-#endif // EIGEN_CUDA_SPECIALFUNCTIONS_H
+#endif // EIGEN_GPU_SPECIALFUNCTIONS_H