aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2013-11-05 15:41:45 +0100
committerGravatar Gael Guennebaud <g.gael@free.fr>2013-11-05 15:41:45 +0100
commit4f572e4c14445158bd9e58c2ba651528847053d6 (patch)
treec351dc6516ae753f3c2ad74efd70e5432d37b27c
parent87aee5fda1d42f5e6fdbce3c5c91f28e291147cd (diff)
Add minimalistic unit tests for NVCC support
-rw-r--r--Eigen/Core9
-rw-r--r--cmake/EigenTesting.cmake25
-rw-r--r--test/CMakeLists.txt22
-rw-r--r--test/cuda_basic.cu116
-rw-r--r--test/cuda_common.h98
-rw-r--r--test/main.h8
6 files changed, 272 insertions, 6 deletions
diff --git a/Eigen/Core b/Eigen/Core
index d0f0adbe4..4c9c3d297 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -17,7 +17,14 @@
// Handle NVCC/CUDA
#ifdef __CUDACC__
// Do not try asserts on CUDA!
+ #ifndef EIGEN_NO_DEBUG
#define EIGEN_NO_DEBUG
+ #endif
+
+ #ifdef EIGEN_INTERNAL_DEBUGGING
+ #undef EIGEN_INTERNAL_DEBUGGING
+ #endif
+
// Do not try to vectorize on CUDA!
#define EIGEN_DONT_VECTORIZE
@@ -190,7 +197,7 @@
#include <intrin.h>
#endif
-#if defined(_CPPUNWIND) || defined(__EXCEPTIONS)
+#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__)
#define EIGEN_EXCEPTIONS
#endif
diff --git a/cmake/EigenTesting.cmake b/cmake/EigenTesting.cmake
index 73a62fd73..fdc166bf8 100644
--- a/cmake/EigenTesting.cmake
+++ b/cmake/EigenTesting.cmake
@@ -11,9 +11,20 @@ endmacro(ei_add_property)
#internal. See documentation of ei_add_test for details.
macro(ei_add_test_internal testname testname_with_suffix)
set(targetname ${testname_with_suffix})
-
- set(filename ${testname}.cpp)
- add_executable(${targetname} ${filename})
+
+ if(EIGEN_ADD_TEST_FILENAME_EXTENSION)
+ set(filename ${testname}.${EIGEN_ADD_TEST_FILENAME_EXTENSION})
+ else()
+ set(filename ${testname}.cpp)
+ endif()
+
+ if(EIGEN_ADD_TEST_FILENAME_EXTENSION STREQUAL cu)
+ cuda_add_executable(${targetname} ${filename})
+ else()
+ add_executable(${targetname} ${filename})
+ endif()
+
+
if (targetname MATCHES "^eigen2_")
add_dependencies(eigen2_buildtests ${targetname})
else()
@@ -127,7 +138,13 @@ macro(ei_add_test testname)
set(EIGEN_TESTS_LIST "${EIGEN_TESTS_LIST}${testname}\n")
set_property(GLOBAL PROPERTY EIGEN_TESTS_LIST "${EIGEN_TESTS_LIST}")
- file(READ "${testname}.cpp" test_source)
+ if(EIGEN_ADD_TEST_FILENAME_EXTENSION)
+ set(filename ${testname}.${EIGEN_ADD_TEST_FILENAME_EXTENSION})
+ else()
+ set(filename ${testname}.cpp)
+ endif()
+
+ file(READ "${filename}" test_source)
set(parts 0)
string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+"
occurences "${test_source}")
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 547d7505c..5b9e92f01 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -286,3 +286,25 @@ option(EIGEN_TEST_EIGEN2 "Run whole Eigen2 test suite against EIGEN2_SUPPORT" OF
if(EIGEN_TEST_EIGEN2)
add_subdirectory(eigen2)
endif()
+
+
+# NVCC unit tests
+option(EIGEN_TEST_NVCC "Enable NVCC support in unit tests" OFF)
+if(EIGEN_TEST_NVCC)
+
+find_package(CUDA)
+if(CUDA_FOUND)
+
+ set(CUDA_PROPAGATE_HOST_FLAGS OFF)
+ set(CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
+ cuda_include_directories(${CMAKE_CURRENT_BINARY_DIR})
+ set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
+
+ ei_add_test(cuda_basic)
+
+ unset(EIGEN_ADD_TEST_FILENAME_EXTENSION)
+
+endif(CUDA_FOUND)
+
+endif(EIGEN_TEST_NVCC)
+
diff --git a/test/cuda_basic.cu b/test/cuda_basic.cu
new file mode 100644
index 000000000..a062947a8
--- /dev/null
+++ b/test/cuda_basic.cu
@@ -0,0 +1,116 @@
+
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cuda_basic
+#include "main.h"
+#include "cuda_common.h"
+
+#include <Eigen/Eigenvalues>
+
+// struct Foo{
+// EIGEN_DEVICE_FUNC
+// void operator()(int i, const float* mats, float* vecs) const {
+// using namespace Eigen;
+// // Matrix3f M(data);
+// // Vector3f x(data+9);
+// // Map<Vector3f>(data+9) = M.inverse() * x;
+// Matrix3f M(mats+i/16);
+// Vector3f x(vecs+i*3);
+// // using std::min;
+// // using std::sqrt;
+// Map<Vector3f>(vecs+i*3) << x.minCoeff(), 1, 2;// / x.dot(x);//(M.inverse() * x) / x.x();
+// //x = x*2 + x.y() * x + x * x.maxCoeff() - x / x.sum();
+// }
+// };
+
+template<typename T>
+struct coeff_wise {
+ EIGEN_DEVICE_FUNC
+ void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const
+ {
+ using namespace Eigen;
+ T x1(in+i);
+ T x2(in+i+1);
+ T x3(in+i+2);
+ Map<T> res(out+i*T::MaxSizeAtCompileTime);
+
+ res.array() += (in[0] * x1 + x2).array() * x3.array();
+ }
+};
+
+template<typename T>
+struct redux {
+ EIGEN_DEVICE_FUNC
+ void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const
+ {
+ using namespace Eigen;
+ int N = 6;
+ T x1(in+i);
+ out[i*N+0] = x1.minCoeff();
+ out[i*N+1] = x1.maxCoeff();
+ out[i*N+2] = x1.sum();
+ out[i*N+3] = x1.prod();
+// out[i*N+4] = x1.colwise().sum().maxCoeff();
+// out[i*N+5] = x1.rowwise().maxCoeff().sum();
+ }
+};
+
+template<typename T1, typename T2>
+struct prod {
+ EIGEN_DEVICE_FUNC
+ void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const
+ {
+ using namespace Eigen;
+ typedef Matrix<typename T1::Scalar, T1::RowsAtCompileTime, T2::ColsAtCompileTime> T3;
+ T1 x1(in+i);
+ T2 x2(in+i+1);
+ Map<T3> res(out+i*T3::MaxSizeAtCompileTime);
+ res += in[i] * x1 * x2;
+ }
+};
+
+
+template<typename T>
+struct eigenvalues {
+ EIGEN_DEVICE_FUNC
+ void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const
+ {
+ using namespace Eigen;
+ typedef Matrix<typename T::Scalar, T::RowsAtCompileTime, 1> Vec;
+ T M(in+i);
+ Map<Vec> res(out+i*Vec::MaxSizeAtCompileTime);
+ T A = M*M.adjoint();
+ SelfAdjointEigenSolver<T> eig;
+ eig.computeDirect(A);
+ res = A.eigenvalues();
+ }
+};
+
+
+void test_cuda_basic()
+{
+ ei_test_init_cuda();
+
+ int nthreads = 100;
+ Eigen::VectorXf in, out;
+
+ #ifndef __CUDA_ARCH__
+ int data_size = nthreads * 16;
+ in.setRandom(data_size);
+ out.setRandom(data_size);
+ #endif
+
+ CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise<Vector3f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise<Array44f>(), nthreads, in, out) );
+
+ CALL_SUBTEST( run_and_compare_to_cuda(redux<Array4f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_cuda(redux<Matrix3f>(), nthreads, in, out) );
+
+ CALL_SUBTEST( run_and_compare_to_cuda(prod<Matrix3f,Matrix3f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_cuda(prod<Matrix4f,Vector4f>(), nthreads, in, out) );
+
+// CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix3f>(), nthreads, in, out) );
+// CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix2f>(), nthreads, in, out) );
+
+}
diff --git a/test/cuda_common.h b/test/cuda_common.h
new file mode 100644
index 000000000..0dd870b9c
--- /dev/null
+++ b/test/cuda_common.h
@@ -0,0 +1,98 @@
+
+#ifndef EIGEN_TEST_CUDA_COMMON_H
+#define EIGEN_TEST_CUDA_COMMON_H
+
+#include <cuda.h>
+#include <cuda_runtime.h>
+#include <cuda_runtime_api.h>
+#include <iostream>
+
+#ifndef __CUDACC__
+dim3 threadIdx, blockDim, blockIdx;
+#endif
+
+template<typename Kernel, typename Input, typename Output>
+void run_on_cpu(const Kernel& ker, int n, const Input& in, Output& out)
+{
+ for(int i=0; i<n; i++)
+ ker(i, in.data(), out.data());
+}
+
+
+template<typename Kernel, typename Input, typename Output>
+__global__
+void run_on_cuda_meta_kernel(const Kernel ker, int n, const Input* in, Output* out)
+{
+ int i = threadIdx.x + blockIdx.x*blockDim.x;
+ if(i<n) {
+ ker(i, in, out);
+ }
+}
+
+
+template<typename Kernel, typename Input, typename Output>
+void run_on_cuda(const Kernel& ker, int n, const Input& in, Output& out)
+{
+ typename Input::Scalar* d_in;
+ typename Output::Scalar* d_out;
+ std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar);
+ std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar);
+
+ cudaMalloc((void**)(&d_in), in_bytes);
+ cudaMalloc((void**)(&d_out), out_bytes);
+
+ cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
+ cudaMemcpy(d_out, out.data(), out_bytes, cudaMemcpyHostToDevice);
+
+ // Simple and non-optimal 1D mapping assuming n is not too large
+ // That's only for unit testing!
+ dim3 Blocks(128);
+ dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) );
+
+ cudaThreadSynchronize();
+ run_on_cuda_meta_kernel<<<Grids,Blocks>>>(ker, n, d_in, d_out);
+ cudaThreadSynchronize();
+
+ // check inputs have not been modified
+ cudaMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, cudaMemcpyDeviceToHost);
+ cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost);
+
+ cudaFree(d_in);
+ cudaFree(d_out);
+}
+
+
+template<typename Kernel, typename Input, typename Output>
+void run_and_compare_to_cuda(const Kernel& ker, int n, const Input& in, Output& out)
+{
+ Input in_ref, in_cuda;
+ Output out_ref, out_cuda;
+ #ifndef __CUDA_ARCH__
+ in_ref = in_cuda = in;
+ out_ref = out_cuda = out;
+ #endif
+ run_on_cpu (ker, n, in_ref, out_ref);
+ run_on_cuda(ker, n, in_cuda, out_cuda);
+ #ifndef __CUDA_ARCH__
+ VERIFY_IS_APPROX(in_ref, in_cuda);
+ VERIFY_IS_APPROX(out_ref, out_cuda);
+ #endif
+}
+
+
+void ei_test_init_cuda()
+{
+ int device = 0;
+ cudaDeviceProp deviceProp;
+ cudaGetDeviceProperties(&deviceProp, device);
+ std::cout << "CUDA device info:\n";
+ std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n";
+ std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n";
+ std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << "\n";
+ std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << "\n";
+ std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << "\n";
+ std::cout << " clockRate: " << deviceProp.clockRate << "\n";
+ std::cout << " computeMode: " << deviceProp.computeMode << "\n";
+}
+
+#endif // EIGEN_TEST_CUDA_COMMON_H
diff --git a/test/main.h b/test/main.h
index 14f0d2f78..c889fa6c5 100644
--- a/test/main.h
+++ b/test/main.h
@@ -138,7 +138,7 @@ namespace Eigen
Eigen::internal::push_assert = false; \
}
- #else // EIGEN_DEBUG_ASSERTS
+ #elif !defined(__CUDACC__) // EIGEN_DEBUG_ASSERTS
// see bug 89. The copy_bool here is working around a bug in gcc <= 4.3
#define eigen_assert(a) \
if( (!Eigen::internal::copy_bool(a)) && (!no_more_assert) )\
@@ -162,7 +162,9 @@ namespace Eigen
#endif // EIGEN_DEBUG_ASSERTS
+ #if !defined(__CUDACC__)
#define EIGEN_USE_CUSTOM_ASSERT
+ #endif
#else // EIGEN_NO_ASSERTION_CHECKING
@@ -238,6 +240,7 @@ inline bool test_isMuchSmallerThan(const double& a, const double& b)
inline bool test_isApproxOrLessThan(const double& a, const double& b)
{ return internal::isApproxOrLessThan(a, b, test_precision<double>()); }
+#ifndef EIGEN_TEST_NO_COMPLEX
inline bool test_isApprox(const std::complex<float>& a, const std::complex<float>& b)
{ return internal::isApprox(a, b, test_precision<std::complex<float> >()); }
inline bool test_isMuchSmallerThan(const std::complex<float>& a, const std::complex<float>& b)
@@ -247,7 +250,9 @@ inline bool test_isApprox(const std::complex<double>& a, const std::complex<doub
{ return internal::isApprox(a, b, test_precision<std::complex<double> >()); }
inline bool test_isMuchSmallerThan(const std::complex<double>& a, const std::complex<double>& b)
{ return internal::isMuchSmallerThan(a, b, test_precision<std::complex<double> >()); }
+#endif
+#ifndef EIGEN_TEST_NO_LONGDOUBLE
inline bool test_isApprox(const long double& a, const long double& b)
{
bool ret = internal::isApprox(a, b, test_precision<long double>());
@@ -261,6 +266,7 @@ inline bool test_isMuchSmallerThan(const long double& a, const long double& b)
{ return internal::isMuchSmallerThan(a, b, test_precision<long double>()); }
inline bool test_isApproxOrLessThan(const long double& a, const long double& b)
{ return internal::isApproxOrLessThan(a, b, test_precision<long double>()); }
+#endif // EIGEN_TEST_NO_LONGDOUBLE
template<typename Type1, typename Type2>
inline bool test_isApprox(const Type1& a, const Type2& b)