aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-10-12 13:46:29 -0700
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-10-12 13:46:29 -0700
commit78d2926508857f0885779b0997462de5ec378142 (patch)
treea8febe2f3c8ef0946fa762bc91288a724af7d95b
parent2e2f48e30e24b0b6113f052caa5bb817625d8081 (diff)
parentf939c351cbfcb1007943fe6062503bc455b692e1 (diff)
Merged eigen/eigen into default
-rw-r--r--Eigen/src/Core/arch/AVX/MathFunctions.h35
-rw-r--r--Eigen/src/Core/arch/CUDA/PacketMathHalf.h24
-rw-r--r--Eigen/src/Core/arch/NEON/Complex.h15
-rw-r--r--Eigen/src/Core/arch/SSE/MathFunctions.h28
-rw-r--r--Eigen/src/Core/util/Macros.h4
-rwxr-xr-x[-rw-r--r--]Eigen/src/Geometry/Scaling.h12
-rw-r--r--Eigen/src/QR/ColPivHouseholderQR.h6
-rw-r--r--Eigen/src/QR/FullPivHouseholderQR.h6
-rw-r--r--Eigen/src/SPQRSupport/SuiteSparseQRSupport.h7
-rwxr-xr-x[-rw-r--r--]test/geo_transformations.cpp0
-rw-r--r--test/packetmath.cpp31
-rw-r--r--test/qr_colpivoting.cpp12
-rw-r--r--test/qr_fullpivoting.cpp12
-rw-r--r--test/spqr_support.cpp4
-rw-r--r--unsupported/Eigen/AlignedVector32
-rw-r--r--unsupported/Eigen/CXX11/Tensor2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h12
-rw-r--r--unsupported/test/cxx11_tensor_argmax_cuda.cu14
-rw-r--r--unsupported/test/cxx11_tensor_complex_cuda.cu38
19 files changed, 185 insertions, 79 deletions
diff --git a/Eigen/src/Core/arch/AVX/MathFunctions.h b/Eigen/src/Core/arch/AVX/MathFunctions.h
index d21ec39dd..6af67ce2d 100644
--- a/Eigen/src/Core/arch/AVX/MathFunctions.h
+++ b/Eigen/src/Core/arch/AVX/MathFunctions.h
@@ -355,30 +355,27 @@ pexp<Packet4d>(const Packet4d& _x) {
// Functions for sqrt.
// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step
// of Newton's method, at a cost of 1-2 bits of precision as opposed to the
-// exact solution. The main advantage of this approach is not just speed, but
-// also the fact that it can be inlined and pipelined with other computations,
-// further reducing its effective latency.
+// exact solution. It does not handle +inf, or denormalized numbers correctly.
+// The main advantage of this approach is not just speed, but also the fact that
+// it can be inlined and pipelined with other computations, further reducing its
+// effective latency. This is similar to Quake3's fast inverse square root.
+// For detail see here: http://www.beyond3d.com/content/articles/8/
#if EIGEN_FAST_MATH
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
psqrt<Packet8f>(const Packet8f& _x) {
- _EIGEN_DECLARE_CONST_Packet8f(one_point_five, 1.5f);
- _EIGEN_DECLARE_CONST_Packet8f(minus_half, -0.5f);
- _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(flt_min, 0x00800000);
-
- Packet8f neg_half = pmul(_x, p8f_minus_half);
-
- // select only the inverse sqrt of positive normal inputs (denormals are
- // flushed to zero and cause infs as well).
- Packet8f non_zero_mask = _mm256_cmp_ps(_x, p8f_flt_min, _CMP_GE_OQ);
- Packet8f x = _mm256_and_ps(non_zero_mask, _mm256_rsqrt_ps(_x));
-
+ Packet8f half = pmul(_x, pset1<Packet8f>(.5f));
+ Packet8f denormal_mask = _mm256_and_ps(
+ _mm256_cmp_ps(_x, pset1<Packet8f>((std::numeric_limits<float>::min)()),
+ _CMP_LT_OQ),
+ _mm256_cmp_ps(_x, _mm256_setzero_ps(), _CMP_GE_OQ));
+
+ // Compute approximate reciprocal sqrt.
+ Packet8f x = _mm256_rsqrt_ps(_x);
// Do a single step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p8f_one_point_five));
-
- // Multiply the original _x by it's reciprocal square root to extract the
- // square root.
- return pmul(_x, x);
+ x = pmul(x, psub(pset1<Packet8f>(1.5f), pmul(half, pmul(x,x))));
+ // Flush results for denormals to zero.
+ return _mm256_andnot_ps(denormal_mask, pmul(_x,x));
}
#else
template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
diff --git a/Eigen/src/Core/arch/CUDA/PacketMathHalf.h b/Eigen/src/Core/arch/CUDA/PacketMathHalf.h
index f0bc36d9a..4f8958c38 100644
--- a/Eigen/src/Core/arch/CUDA/PacketMathHalf.h
+++ b/Eigen/src/Core/arch/CUDA/PacketMathHalf.h
@@ -865,6 +865,30 @@ template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8h>(Eigen::half*
to[stride*7].x = aux[7].x;
}
+template<> EIGEN_STRONG_INLINE Eigen::half predux<Packet8h>(const Packet8h& a) {
+ Packet8f af = half2float(a);
+ float reduced = predux<Packet8f>(af);
+ return Eigen::half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8h>(const Packet8h& a) {
+ Packet8f af = half2float(a);
+ float reduced = predux_max<Packet8f>(af);
+ return Eigen::half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8h>(const Packet8h& a) {
+ Packet8f af = half2float(a);
+ float reduced = predux_min<Packet8f>(af);
+ return Eigen::half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h& a) {
+ Packet8f af = half2float(a);
+ float reduced = predux_mul<Packet8f>(af);
+ return Eigen::half(reduced);
+}
+
EIGEN_STRONG_INLINE void
ptranspose(PacketBlock<Packet8h,8>& kernel) {
__m128i a = kernel.packet[0].x;
diff --git a/Eigen/src/Core/arch/NEON/Complex.h b/Eigen/src/Core/arch/NEON/Complex.h
index 3e121dce5..57e9b431f 100644
--- a/Eigen/src/Core/arch/NEON/Complex.h
+++ b/Eigen/src/Core/arch/NEON/Complex.h
@@ -16,8 +16,14 @@ namespace Eigen {
namespace internal {
inline uint32x4_t p4ui_CONJ_XOR() {
+// See bug 1325, clang fails to call vld1q_u64.
+#if EIGEN_COMP_CLANG
+ uint32x4_t ret = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
+ return ret;
+#else
static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
return vld1q_u32( conj_XOR_DATA );
+#endif
}
inline uint32x2_t p2ui_CONJ_XOR() {
@@ -282,8 +288,13 @@ ptranspose(PacketBlock<Packet2cf,2>& kernel) {
//---------- double ----------
#if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG
-const uint64_t p2ul_conj_XOR_DATA[] = { 0x0, 0x8000000000000000 };
-static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA );
+// See bug 1325, clang fails to call vld1q_u64.
+#if EIGEN_COMP_CLANG
+ static uint64x2_t p2ul_CONJ_XOR = {0x0, 0x8000000000000000};
+#else
+ const uint64_t p2ul_conj_XOR_DATA[] = { 0x0, 0x8000000000000000 };
+ static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA );
+#endif
struct Packet1cd
{
diff --git a/Eigen/src/Core/arch/SSE/MathFunctions.h b/Eigen/src/Core/arch/SSE/MathFunctions.h
index ac2fd8103..7b5f948e1 100644
--- a/Eigen/src/Core/arch/SSE/MathFunctions.h
+++ b/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -32,7 +32,7 @@ Packet4f plog<Packet4f>(const Packet4f& _x)
/* the smallest non denormalized float number */
_EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
_EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000);//-1.f/0.f);
-
+
/* natural logarithm computed for 4 simultaneous float
return NaN for x <= 0
*/
@@ -444,25 +444,33 @@ Packet4f pcos<Packet4f>(const Packet4f& _x)
#if EIGEN_FAST_MATH
-// This is based on Quake3's fast inverse square root.
+// Functions for sqrt.
+// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step
+// of Newton's method, at a cost of 1-2 bits of precision as opposed to the
+// exact solution. It does not handle +inf, or denormalized numbers correctly.
+// The main advantage of this approach is not just speed, but also the fact that
+// it can be inlined and pipelined with other computations, further reducing its
+// effective latency. This is similar to Quake3's fast inverse square root.
// For detail see here: http://www.beyond3d.com/content/articles/8/
-// It lacks 1 (or 2 bits in some rare cases) of precision, and does not handle negative, +inf, or denormalized numbers correctly.
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f psqrt<Packet4f>(const Packet4f& _x)
{
Packet4f half = pmul(_x, pset1<Packet4f>(.5f));
+ Packet4f denormal_mask = _mm_and_ps(
+ _mm_cmpge_ps(_x, _mm_setzero_ps()),
+ _mm_cmplt_ps(_x, pset1<Packet4f>((std::numeric_limits<float>::min)())));
- /* select only the inverse sqrt of non-zero inputs */
- Packet4f non_zero_mask = _mm_cmpge_ps(_x, pset1<Packet4f>((std::numeric_limits<float>::min)()));
- Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x));
-
+ // Compute approximate reciprocal sqrt.
+ Packet4f x = _mm_rsqrt_ps(_x);
+ // Do a single step of Newton's iteration.
x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x))));
- return pmul(_x,x);
+ // Flush results for denormals to zero.
+ return _mm_andnot_ps(denormal_mask, pmul(_x,x));
}
#else
-template<>EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+template<>EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f psqrt<Packet4f>(const Packet4f& x) { return _mm_sqrt_ps(x); }
#endif
@@ -491,7 +499,7 @@ Packet4f prsqrt<Packet4f>(const Packet4f& _x) {
Packet4f neg_mask = _mm_cmplt_ps(_x, _mm_setzero_ps());
Packet4f zero_mask = _mm_andnot_ps(neg_mask, le_zero_mask);
Packet4f infs_and_nans = _mm_or_ps(_mm_and_ps(neg_mask, p4f_nan),
- _mm_and_ps(zero_mask, p4f_inf));
+ _mm_and_ps(zero_mask, p4f_inf));
// Do a single step of Newton's iteration.
x = pmul(x, pmadd(neg_half, pmul(x, x), p4f_one_point_five));
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index 9069d8e6b..108504333 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -392,8 +392,8 @@
// Does the compiler support variadic templates?
#ifndef EIGEN_HAS_VARIADIC_TEMPLATES
#if EIGEN_MAX_CPP_VER>=11 && (__cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900) \
- && ( !defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 )
- // ^^ Disable the use of variadic templates when compiling with nvcc on ARM devices:
+ && ( !defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000) )
+ // ^^ Disable the use of variadic templates when compiling with versions of nvcc older than 8.0 on ARM devices:
// this prevents nvcc from crashing when compiling Eigen on Tegra X1
#define EIGEN_HAS_VARIADIC_TEMPLATES 1
#else
diff --git a/Eigen/src/Geometry/Scaling.h b/Eigen/src/Geometry/Scaling.h
index 3e12681b0..f58ca03d9 100644..100755
--- a/Eigen/src/Geometry/Scaling.h
+++ b/Eigen/src/Geometry/Scaling.h
@@ -118,28 +118,28 @@ operator*(const MatrixBase<Derived>& matrix, const UniformScaling<Scalar>& s)
{ return matrix.derived() * s.factor(); }
/** Constructs a uniform scaling from scale factor \a s */
-static inline UniformScaling<float> Scaling(float s) { return UniformScaling<float>(s); }
+inline UniformScaling<float> Scaling(float s) { return UniformScaling<float>(s); }
/** Constructs a uniform scaling from scale factor \a s */
-static inline UniformScaling<double> Scaling(double s) { return UniformScaling<double>(s); }
+inline UniformScaling<double> Scaling(double s) { return UniformScaling<double>(s); }
/** Constructs a uniform scaling from scale factor \a s */
template<typename RealScalar>
-static inline UniformScaling<std::complex<RealScalar> > Scaling(const std::complex<RealScalar>& s)
+inline UniformScaling<std::complex<RealScalar> > Scaling(const std::complex<RealScalar>& s)
{ return UniformScaling<std::complex<RealScalar> >(s); }
/** Constructs a 2D axis aligned scaling */
template<typename Scalar>
-static inline DiagonalMatrix<Scalar,2> Scaling(const Scalar& sx, const Scalar& sy)
+inline DiagonalMatrix<Scalar,2> Scaling(const Scalar& sx, const Scalar& sy)
{ return DiagonalMatrix<Scalar,2>(sx, sy); }
/** Constructs a 3D axis aligned scaling */
template<typename Scalar>
-static inline DiagonalMatrix<Scalar,3> Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)
+inline DiagonalMatrix<Scalar,3> Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)
{ return DiagonalMatrix<Scalar,3>(sx, sy, sz); }
/** Constructs an axis aligned scaling expression from vector expression \a coeffs
* This is an alias for coeffs.asDiagonal()
*/
template<typename Derived>
-static inline const DiagonalWrapper<const Derived> Scaling(const MatrixBase<Derived>& coeffs)
+inline const DiagonalWrapper<const Derived> Scaling(const MatrixBase<Derived>& coeffs)
{ return coeffs.asDiagonal(); }
/** \deprecated */
diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h
index 9650781d6..618f80480 100644
--- a/Eigen/src/QR/ColPivHouseholderQR.h
+++ b/Eigen/src/QR/ColPivHouseholderQR.h
@@ -613,12 +613,12 @@ void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &
namespace internal {
-template<typename DstXprType, typename MatrixType, typename Scalar>
-struct Assignment<DstXprType, Inverse<ColPivHouseholderQR<MatrixType> >, internal::assign_op<Scalar,Scalar>, Dense2Dense>
+template<typename DstXprType, typename MatrixType>
+struct Assignment<DstXprType, Inverse<ColPivHouseholderQR<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename ColPivHouseholderQR<MatrixType>::Scalar>, Dense2Dense>
{
typedef ColPivHouseholderQR<MatrixType> QrType;
typedef Inverse<QrType> SrcXprType;
- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &)
{
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
}
diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h
index e0e15100d..e489bddc2 100644
--- a/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/Eigen/src/QR/FullPivHouseholderQR.h
@@ -575,12 +575,12 @@ void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType
namespace internal {
-template<typename DstXprType, typename MatrixType, typename Scalar>
-struct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<Scalar,Scalar>, Dense2Dense>
+template<typename DstXprType, typename MatrixType>
+struct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivHouseholderQR<MatrixType>::Scalar>, Dense2Dense>
{
typedef FullPivHouseholderQR<MatrixType> QrType;
typedef Inverse<QrType> SrcXprType;
- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &)
{
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
}
diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
index d9c3113e7..953d57c9d 100644
--- a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
+++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
@@ -119,16 +119,16 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
max2Norm = RealScalar(1);
pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits<RealScalar>::epsilon();
}
-
cholmod_sparse A;
A = viewAsCholmod(mat);
+ m_rows = matrix.rows();
Index col = matrix.cols();
m_rank = SuiteSparseQR<Scalar>(m_ordering, pivotThreshold, col, &A,
&m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);
if (!m_cR)
{
- m_info = NumericalIssue;
+ m_info = NumericalIssue;
m_isInitialized = false;
return;
}
@@ -139,7 +139,7 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
/**
* Get the number of rows of the input matrix and the Q matrix
*/
- inline Index rows() const {return m_cR->nrow; }
+ inline Index rows() const {return m_rows; }
/**
* Get the number of columns of the input matrix.
@@ -245,6 +245,7 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
mutable Index m_rank; // The rank of the matrix
mutable cholmod_common m_cc; // Workspace and parameters
bool m_useDefaultThreshold; // Use default threshold
+ Index m_rows;
template<typename ,typename > friend struct SPQR_QProduct;
};
diff --git a/test/geo_transformations.cpp b/test/geo_transformations.cpp
index 278e527c2..278e527c2 100644..100755
--- a/test/geo_transformations.cpp
+++ b/test/geo_transformations.cpp
diff --git a/test/packetmath.cpp b/test/packetmath.cpp
index 32467deda..d6854c8c3 100644
--- a/test/packetmath.cpp
+++ b/test/packetmath.cpp
@@ -201,7 +201,7 @@ template<typename Scalar> void packetmath()
internal::pstore(data2+3*PacketSize, A3);
VERIFY(areApprox(ref, data2, 4*PacketSize) && "internal::pbroadcast4");
}
-
+
{
for (int i=0; i<PacketSize*2; ++i)
ref[i] = data1[i/PacketSize];
@@ -211,9 +211,9 @@ template<typename Scalar> void packetmath()
internal::pstore(data2+1*PacketSize, A1);
VERIFY(areApprox(ref, data2, 2*PacketSize) && "internal::pbroadcast2");
}
-
+
VERIFY(internal::isApprox(data1[0], internal::pfirst(internal::pload<Packet>(data1))) && "internal::pfirst");
-
+
if(PacketSize>1)
{
for(int offset=0;offset<4;++offset)
@@ -323,7 +323,7 @@ template<typename Scalar> void packetmath_real()
CHECK_CWISE1_IF(PacketTraits::HasRound, numext::round, internal::pround);
CHECK_CWISE1_IF(PacketTraits::HasCeil, numext::ceil, internal::pceil);
CHECK_CWISE1_IF(PacketTraits::HasFloor, numext::floor, internal::pfloor);
-
+
for (int i=0; i<size; ++i)
{
data1[i] = internal::random<Scalar>(-1,1);
@@ -448,12 +448,9 @@ template<typename Scalar> void packetmath_real()
data1[0] = Scalar(-1.0f);
h.store(data2, internal::plog(h.load(data1)));
VERIFY((numext::isnan)(data2[0]));
-#if !EIGEN_FAST_MATH
h.store(data2, internal::psqrt(h.load(data1)));
VERIFY((numext::isnan)(data2[0]));
VERIFY((numext::isnan)(data2[1]));
-#endif
-
}
}
@@ -467,7 +464,7 @@ template<typename Scalar> void packetmath_notcomplex()
EIGEN_ALIGN_MAX Scalar data1[PacketTraits::size*4];
EIGEN_ALIGN_MAX Scalar data2[PacketTraits::size*4];
EIGEN_ALIGN_MAX Scalar ref[PacketTraits::size*4];
-
+
Array<Scalar,Dynamic,1>::Map(data1, PacketTraits::size*4).setRandom();
ref[0] = data1[0];
@@ -486,7 +483,7 @@ template<typename Scalar> void packetmath_notcomplex()
for (int i=0; i<PacketSize; ++i)
ref[0] = (std::max)(ref[0],data1[i]);
VERIFY(internal::isApprox(ref[0], internal::predux_max(internal::pload<Packet>(data1))) && "internal::predux_max");
-
+
for (int i=0; i<PacketSize; ++i)
ref[i] = data1[0]+Scalar(i);
internal::pstore(data2, internal::plset<Packet>(data1[0]));
@@ -498,12 +495,12 @@ template<typename Scalar,bool ConjLhs,bool ConjRhs> void test_conj_helper(Scalar
typedef internal::packet_traits<Scalar> PacketTraits;
typedef typename PacketTraits::type Packet;
const int PacketSize = PacketTraits::size;
-
+
internal::conj_if<ConjLhs> cj0;
internal::conj_if<ConjRhs> cj1;
internal::conj_helper<Scalar,Scalar,ConjLhs,ConjRhs> cj;
internal::conj_helper<Packet,Packet,ConjLhs,ConjRhs> pcj;
-
+
for(int i=0;i<PacketSize;++i)
{
ref[i] = cj0(data1[i]) * cj1(data2[i]);
@@ -511,7 +508,7 @@ template<typename Scalar,bool ConjLhs,bool ConjRhs> void test_conj_helper(Scalar
}
internal::pstore(pval,pcj.pmul(internal::pload<Packet>(data1),internal::pload<Packet>(data2)));
VERIFY(areApprox(ref, pval, PacketSize) && "conj_helper pmul");
-
+
for(int i=0;i<PacketSize;++i)
{
Scalar tmp = ref[i];
@@ -539,12 +536,12 @@ template<typename Scalar> void packetmath_complex()
data1[i] = internal::random<Scalar>() * Scalar(1e2);
data2[i] = internal::random<Scalar>() * Scalar(1e2);
}
-
+
test_conj_helper<Scalar,false,false> (data1,data2,ref,pval);
test_conj_helper<Scalar,false,true> (data1,data2,ref,pval);
test_conj_helper<Scalar,true,false> (data1,data2,ref,pval);
test_conj_helper<Scalar,true,true> (data1,data2,ref,pval);
-
+
{
for(int i=0;i<PacketSize;++i)
ref[i] = Scalar(std::imag(data1[i]),std::real(data1[i]));
@@ -564,9 +561,9 @@ template<typename Scalar> void packetmath_scatter_gather()
for (int i=0; i<PacketSize; ++i) {
data1[i] = internal::random<Scalar>()/RealScalar(PacketSize);
}
-
+
int stride = internal::random<int>(1,20);
-
+
EIGEN_ALIGN_MAX Scalar buffer[PacketSize*20];
memset(buffer, 0, 20*sizeof(Packet));
Packet packet = internal::pload<Packet>(data1);
@@ -602,7 +599,7 @@ void test_packetmath()
CALL_SUBTEST_1( packetmath_notcomplex<float>() );
CALL_SUBTEST_2( packetmath_notcomplex<double>() );
CALL_SUBTEST_3( packetmath_notcomplex<int>() );
-
+
CALL_SUBTEST_1( packetmath_real<float>() );
CALL_SUBTEST_2( packetmath_real<double>() );
diff --git a/test/qr_colpivoting.cpp b/test/qr_colpivoting.cpp
index 057bb014c..26ed27f5c 100644
--- a/test/qr_colpivoting.cpp
+++ b/test/qr_colpivoting.cpp
@@ -141,6 +141,18 @@ template<typename MatrixType> void qr()
m2 = MatrixType::Random(cols,cols2);
m2 = qr.solve(m3);
VERIFY_IS_APPROX(m3, m1*m2);
+
+ {
+ Index size = rows;
+ do {
+ m1 = MatrixType::Random(size,size);
+ qr.compute(m1);
+ } while(!qr.isInvertible());
+ MatrixType m1_inv = qr.inverse();
+ m3 = m1 * MatrixType::Random(size,cols2);
+ m2 = qr.solve(m3);
+ VERIFY_IS_APPROX(m2, m1_inv*m3);
+ }
}
template<typename MatrixType, int Cols2> void qr_fixedsize()
diff --git a/test/qr_fullpivoting.cpp b/test/qr_fullpivoting.cpp
index 05a705887..70e89c198 100644
--- a/test/qr_fullpivoting.cpp
+++ b/test/qr_fullpivoting.cpp
@@ -54,6 +54,18 @@ template<typename MatrixType> void qr()
m2 = MatrixType::Random(cols,cols2);
m2 = qr.solve(m3);
VERIFY_IS_APPROX(m3, m1*m2);
+
+ {
+ Index size = rows;
+ do {
+ m1 = MatrixType::Random(size,size);
+ qr.compute(m1);
+ } while(!qr.isInvertible());
+ MatrixType m1_inv = qr.inverse();
+ m3 = m1 * MatrixType::Random(size,cols2);
+ m2 = qr.solve(m3);
+ VERIFY_IS_APPROX(m2, m1_inv*m3);
+ }
}
template<typename MatrixType> void qr_invertible()
diff --git a/test/spqr_support.cpp b/test/spqr_support.cpp
index baa25a0c2..81e63b6a5 100644
--- a/test/spqr_support.cpp
+++ b/test/spqr_support.cpp
@@ -20,8 +20,8 @@ int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows
int cols = internal::random<int>(1,rows);
double density = (std::max)(8./(rows*cols), 0.01);
- A.resize(rows,rows);
- dA.resize(rows,rows);
+ A.resize(rows,cols);
+ dA.resize(rows,cols);
initSparse<Scalar>(density, dA, A,ForceNonZeroDiag);
A.makeCompressed();
return rows;
diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3
index 135eec572..47a86d4c0 100644
--- a/unsupported/Eigen/AlignedVector3
+++ b/unsupported/Eigen/AlignedVector3
@@ -61,7 +61,7 @@ template<typename _Scalar> class AlignedVector3
Scalar* data() { return m_coeffs.data(); }
const Scalar* data() const { return m_coeffs.data(); }
Index innerStride() const { return 1; }
- Index outerStride() const { return m_coeffs.outerStride(); }
+ Index outerStride() const { return 3; }
inline const Scalar& coeff(Index row, Index col) const
{ return m_coeffs.coeff(row, col); }
diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor
index 4976a1254..73aae3da8 100644
--- a/unsupported/Eigen/CXX11/Tensor
+++ b/unsupported/Eigen/CXX11/Tensor
@@ -34,6 +34,8 @@
#include <cstring>
#ifdef _WIN32
+typedef __int16 int16_t;
+typedef unsigned __int16 uint16_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int64 int64_t;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
index 7164e8d60..d73f6dc68 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
@@ -124,7 +124,8 @@ template <typename T> struct SumReducer
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
- return saccum + predux(vaccum);
+ internal::scalar_sum_op<T> sum_op;
+ return sum_op(saccum, predux(vaccum));
}
};
@@ -173,7 +174,8 @@ template <typename T> struct MeanReducer
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
- return (saccum + predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits<Packet>::size);
+ internal::scalar_sum_op<T> sum_op;
+ return sum_op(saccum, predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits<Packet>::size);
}
protected:
@@ -304,7 +306,8 @@ template <typename T> struct ProdReducer
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
- (*accum) *= t;
+ internal::scalar_product_op<T> prod_op;
+ (*accum) = prod_op(*accum, t);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
@@ -328,7 +331,8 @@ template <typename T> struct ProdReducer
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
- return saccum * predux_mul(vaccum);
+ internal::scalar_product_op<T> prod_op;
+ return prod_op(saccum, predux_mul(vaccum));
}
};
diff --git a/unsupported/test/cxx11_tensor_argmax_cuda.cu b/unsupported/test/cxx11_tensor_argmax_cuda.cu
index 6fe8982f2..653443dc5 100644
--- a/unsupported/test/cxx11_tensor_argmax_cuda.cu
+++ b/unsupported/test/cxx11_tensor_argmax_cuda.cu
@@ -116,10 +116,10 @@ void test_cuda_argmax_dim()
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
- VERIFY_IS_EQUAL(tensor_arg.dimensions().TotalSize(),
+ VERIFY_IS_EQUAL(tensor_arg.size(),
size_t(2*3*5*7 / tensor.dimension(dim)));
- for (size_t n = 0; n < tensor_arg.dimensions().TotalSize(); ++n) {
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the first index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
}
@@ -144,7 +144,7 @@ void test_cuda_argmax_dim()
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
- for (size_t n = 0; n < tensor_arg.dimensions().TotalSize(); ++n) {
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the last index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
}
@@ -205,10 +205,10 @@ void test_cuda_argmin_dim()
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
- VERIFY_IS_EQUAL(tensor_arg.dimensions().TotalSize(),
- size_t(2*3*5*7 / tensor.dimension(dim)));
+ VERIFY_IS_EQUAL(tensor_arg.size(),
+ 2*3*5*7 / tensor.dimension(dim));
- for (size_t n = 0; n < tensor_arg.dimensions().TotalSize(); ++n) {
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect min to be in the first index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
}
@@ -233,7 +233,7 @@ void test_cuda_argmin_dim()
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
- for (size_t n = 0; n < tensor_arg.dimensions().TotalSize(); ++n) {
+ for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the last index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
}
diff --git a/unsupported/test/cxx11_tensor_complex_cuda.cu b/unsupported/test/cxx11_tensor_complex_cuda.cu
index f895efd01..d4e111f5d 100644
--- a/unsupported/test/cxx11_tensor_complex_cuda.cu
+++ b/unsupported/test/cxx11_tensor_complex_cuda.cu
@@ -108,8 +108,46 @@ static void test_cuda_sum_reductions() {
}
+static void test_cuda_product_reductions() {
+
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+
+ const int num_rows = internal::random<int>(1024, 5*1024);
+ const int num_cols = internal::random<int>(1024, 5*1024);
+
+ Tensor<std::complex<float>, 2> in(num_rows, num_cols);
+ in.setRandom();
+
+ Tensor<std::complex<float>, 0> full_redux;
+ full_redux = in.prod();
+
+ std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
+ std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
+ std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
+ std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
+ gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
+
+ TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
+ TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
+
+ out_gpu.device(gpu_device) = in_gpu.prod();
+
+ Tensor<std::complex<float>, 0> full_redux_gpu;
+ gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
+ gpu_device.synchronize();
+
+ // Check that the CPU and GPU reductions return the same result.
+ VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
+
+ gpu_device.deallocate(gpu_in_ptr);
+ gpu_device.deallocate(gpu_out_ptr);
+}
+
+
void test_cxx11_tensor_complex()
{
CALL_SUBTEST(test_cuda_nullary());
CALL_SUBTEST(test_cuda_sum_reductions());
+ CALL_SUBTEST(test_cuda_product_reductions());
}