aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Benoit Jacob <jacob.benoit.1@gmail.com>2009-01-08 15:20:21 +0000
committerGravatar Benoit Jacob <jacob.benoit.1@gmail.com>2009-01-08 15:20:21 +0000
commit1d52bd4cad64d8d8662f40c11210b705351b43ab (patch)
treec8fe83368e8e13ba7e8ec0c4ef078f5293cfea4d
parente2d2a7d2226b85569a9360b9738c15498fb454ac (diff)
the big memory changes. the most important changes are:
ei_aligned_malloc now really behaves like a malloc (untyped, doesn't call ctor) ei_aligned_new is the typed variant calling ctor EIGEN_MAKE_ALIGNED_OPERATOR_NEW now takes the class name as parameter
-rw-r--r--Eigen/src/Core/CacheFriendlyProduct.h8
-rw-r--r--Eigen/src/Core/Matrix.h2
-rw-r--r--Eigen/src/Core/MatrixStorage.h24
-rw-r--r--Eigen/src/Core/Product.h16
-rw-r--r--Eigen/src/Core/util/Memory.h242
-rw-r--r--Eigen/src/Geometry/AlignedBox.h2
-rw-r--r--Eigen/src/Geometry/Hyperplane.h2
-rw-r--r--Eigen/src/Geometry/ParametrizedLine.h2
-rw-r--r--Eigen/src/Geometry/Quaternion.h2
-rw-r--r--Eigen/src/Geometry/Scaling.h2
-rw-r--r--Eigen/src/Geometry/Transform.h6
-rw-r--r--Eigen/src/Geometry/Translation.h2
-rw-r--r--Eigen/src/Sparse/SparseLDLT.h16
-rw-r--r--bench/benchVecAdd.cpp6
-rwxr-xr-xbench/btl/libs/hand_vec/hand_vec_interface.hh8
-rw-r--r--doc/InsideEigenExample.dox4
-rw-r--r--doc/UnalignedArrayAssert.dox29
-rw-r--r--test/dynalloc.cpp39
-rw-r--r--test/map.cpp16
-rw-r--r--test/nomalloc.cpp21
-rw-r--r--test/unalignedassert.cpp9
21 files changed, 215 insertions, 243 deletions
diff --git a/Eigen/src/Core/CacheFriendlyProduct.h b/Eigen/src/Core/CacheFriendlyProduct.h
index f23bdc9ea..b37dc8eb2 100644
--- a/Eigen/src/Core/CacheFriendlyProduct.h
+++ b/Eigen/src/Core/CacheFriendlyProduct.h
@@ -95,9 +95,9 @@ static void ei_cache_friendly_product(
const bool needRhsCopy = (PacketSize>1) && ((rhsStride%PacketSize!=0) || (size_t(rhs)%16!=0));
Scalar* EIGEN_RESTRICT block = 0;
const int allocBlockSize = l2BlockRows*size;
- block = ei_aligned_stack_alloc(Scalar, allocBlockSize);
+ block = ei_aligned_stack_new(Scalar, allocBlockSize);
Scalar* EIGEN_RESTRICT rhsCopy
- = ei_aligned_stack_alloc(Scalar, l2BlockSizeAligned*l2BlockSizeAligned);
+ = ei_aligned_stack_new(Scalar, l2BlockSizeAligned*l2BlockSizeAligned);
// loops on each L2 cache friendly blocks of the result
for(int l2i=0; l2i<rows; l2i+=l2BlockRows)
@@ -338,8 +338,8 @@ static void ei_cache_friendly_product(
}
}
- ei_aligned_stack_free(block, Scalar, allocBlockSize);
- ei_aligned_stack_free(rhsCopy, Scalar, l2BlockSizeAligned*l2BlockSizeAligned);
+ ei_aligned_stack_delete(Scalar, block, allocBlockSize);
+ ei_aligned_stack_delete(Scalar, rhsCopy, l2BlockSizeAligned*l2BlockSizeAligned);
}
#endif // EIGEN_EXTERN_INSTANTIATIONS
diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h
index d16cae031..37693acea 100644
--- a/Eigen/src/Core/Matrix.h
+++ b/Eigen/src/Core/Matrix.h
@@ -135,7 +135,7 @@ class Matrix
public:
enum { NeedsToAlign = (Options&AutoAlign) == AutoAlign
&& SizeAtCompileTime!=Dynamic && ((sizeof(Scalar)*SizeAtCompileTime)%16)==0 };
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(Matrix,NeedsToAlign)
EIGEN_STRONG_INLINE int rows() const { return m_storage.rows(); }
EIGEN_STRONG_INLINE int cols() const { return m_storage.cols(); }
diff --git a/Eigen/src/Core/MatrixStorage.h b/Eigen/src/Core/MatrixStorage.h
index 98f948aae..307a1047c 100644
--- a/Eigen/src/Core/MatrixStorage.h
+++ b/Eigen/src/Core/MatrixStorage.h
@@ -147,8 +147,8 @@ template<typename T, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic,
public:
inline explicit ei_matrix_storage() : m_data(0), m_rows(0), m_cols(0) {}
inline ei_matrix_storage(int size, int rows, int cols)
- : m_data(ei_aligned_malloc<T>(size)), m_rows(rows), m_cols(cols) {}
- inline ~ei_matrix_storage() { ei_aligned_free(m_data, m_rows*m_cols); }
+ : m_data(ei_aligned_new<T>(size)), m_rows(rows), m_cols(cols) {}
+ inline ~ei_matrix_storage() { ei_aligned_delete(m_data, m_rows*m_cols); }
inline void swap(ei_matrix_storage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
inline int rows(void) const {return m_rows;}
@@ -157,8 +157,8 @@ template<typename T, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic,
{
if(size != m_rows*m_cols)
{
- ei_aligned_free(m_data, m_rows*m_cols);
- m_data = ei_aligned_malloc<T>(size);
+ ei_aligned_delete(m_data, m_rows*m_cols);
+ m_data = ei_aligned_new<T>(size);
}
m_rows = rows;
m_cols = cols;
@@ -174,8 +174,8 @@ template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic
int m_cols;
public:
inline explicit ei_matrix_storage() : m_data(0), m_cols(0) {}
- inline ei_matrix_storage(int size, int, int cols) : m_data(ei_aligned_malloc<T>(size)), m_cols(cols) {}
- inline ~ei_matrix_storage() { ei_aligned_free(m_data, _Rows*m_cols); }
+ inline ei_matrix_storage(int size, int, int cols) : m_data(ei_aligned_new<T>(size)), m_cols(cols) {}
+ inline ~ei_matrix_storage() { ei_aligned_delete(m_data, _Rows*m_cols); }
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
inline static int rows(void) {return _Rows;}
inline int cols(void) const {return m_cols;}
@@ -183,8 +183,8 @@ template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic
{
if(size != _Rows*m_cols)
{
- ei_aligned_free(m_data, _Rows*m_cols);
- m_data = ei_aligned_malloc<T>(size);
+ ei_aligned_delete(m_data, _Rows*m_cols);
+ m_data = ei_aligned_new<T>(size);
}
m_cols = cols;
}
@@ -199,8 +199,8 @@ template<typename T, int _Cols, int _Options> class ei_matrix_storage<T, Dynamic
int m_rows;
public:
inline explicit ei_matrix_storage() : m_data(0), m_rows(0) {}
- inline ei_matrix_storage(int size, int rows, int) : m_data(ei_aligned_malloc<T>(size)), m_rows(rows) {}
- inline ~ei_matrix_storage() { ei_aligned_free(m_data, _Cols*m_rows); }
+ inline ei_matrix_storage(int size, int rows, int) : m_data(ei_aligned_new<T>(size)), m_rows(rows) {}
+ inline ~ei_matrix_storage() { ei_aligned_delete(m_data, _Cols*m_rows); }
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
inline int rows(void) const {return m_rows;}
inline static int cols(void) {return _Cols;}
@@ -208,8 +208,8 @@ template<typename T, int _Cols, int _Options> class ei_matrix_storage<T, Dynamic
{
if(size != m_rows*_Cols)
{
- ei_aligned_free(m_data, _Cols*m_rows);
- m_data = ei_aligned_malloc<T>(size);
+ ei_aligned_delete(m_data, _Cols*m_rows);
+ m_data = ei_aligned_new<T>(size);
}
m_rows = rows;
}
diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h
index 0e4aa6c20..2fd9d5875 100644
--- a/Eigen/src/Core/Product.h
+++ b/Eigen/src/Core/Product.h
@@ -573,7 +573,7 @@ struct ei_cache_friendly_product_selector<ProductType,LhsRows,ColMajor,HasDirect
_res = &res.coeffRef(0);
else
{
- _res = ei_aligned_stack_alloc(Scalar,res.size());
+ _res = ei_aligned_stack_new(Scalar,res.size());
Map<Matrix<Scalar,DestDerived::RowsAtCompileTime,1> >(_res, res.size()) = res;
}
ei_cache_friendly_product_colmajor_times_vector(res.size(),
@@ -583,7 +583,7 @@ struct ei_cache_friendly_product_selector<ProductType,LhsRows,ColMajor,HasDirect
if (!EvalToRes)
{
res = Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size());
- ei_aligned_stack_free(_res, Scalar, res.size());
+ ei_aligned_stack_delete(Scalar, _res, res.size());
}
}
};
@@ -619,7 +619,7 @@ struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCo
_res = &res.coeffRef(0);
else
{
- _res = ei_aligned_stack_alloc(Scalar, res.size());
+ _res = ei_aligned_stack_new(Scalar, res.size());
Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size()) = res;
}
ei_cache_friendly_product_colmajor_times_vector(res.size(),
@@ -629,7 +629,7 @@ struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCo
if (!EvalToRes)
{
res = Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size());
- ei_aligned_stack_free(_res, Scalar, res.size());
+ ei_aligned_stack_delete(Scalar, _res, res.size());
}
}
};
@@ -652,13 +652,13 @@ struct ei_cache_friendly_product_selector<ProductType,LhsRows,RowMajor,HasDirect
_rhs = &product.rhs().const_cast_derived().coeffRef(0);
else
{
- _rhs = ei_aligned_stack_alloc(Scalar, product.rhs().size());
+ _rhs = ei_aligned_stack_new(Scalar, product.rhs().size());
Map<Matrix<Scalar,Rhs::SizeAtCompileTime,1> >(_rhs, product.rhs().size()) = product.rhs();
}
ei_cache_friendly_product_rowmajor_times_vector(&product.lhs().const_cast_derived().coeffRef(0,0), product.lhs().stride(),
_rhs, product.rhs().size(), res);
- if (!UseRhsDirectly) ei_aligned_stack_free(_rhs, Scalar, product.rhs().size());
+ if (!UseRhsDirectly) ei_aligned_stack_delete(Scalar, _rhs, product.rhs().size());
}
};
@@ -680,13 +680,13 @@ struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCo
_lhs = &product.lhs().const_cast_derived().coeffRef(0);
else
{
- _lhs = ei_aligned_stack_alloc(Scalar, product.lhs().size());
+ _lhs = ei_aligned_stack_new(Scalar, product.lhs().size());
Map<Matrix<Scalar,Lhs::SizeAtCompileTime,1> >(_lhs, product.lhs().size()) = product.lhs();
}
ei_cache_friendly_product_rowmajor_times_vector(&product.rhs().const_cast_derived().coeffRef(0,0), product.rhs().stride(),
_lhs, product.lhs().size(), res);
- if(!UseLhsDirectly) ei_aligned_stack_free(_lhs, Scalar, product.lhs().size());
+ if(!UseLhsDirectly) ei_aligned_stack_delete(Scalar, _lhs, product.lhs().size());
}
};
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index f31304d97..ef4c194fa 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -31,82 +31,125 @@
extern "C" int posix_memalign (void **, size_t, size_t) throw ();
#endif
-struct ei_byte_forcing_aligned_malloc
-{
- unsigned char c; // sizeof must be 1.
-};
-template<typename T> struct ei_force_aligned_malloc { enum { ret = 0 }; };
-template<> struct ei_force_aligned_malloc<ei_byte_forcing_aligned_malloc> { enum { ret = 1 }; };
-
-/** \internal allocates \a size * sizeof(\a T) bytes. If vectorization is enabled and T is such that a packet
- * containts more than one T, then the returned pointer is guaranteed to have 16 bytes alignment.
+/** \internal allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment.
* On allocation error, the returned pointer is undefined, but if exceptions are enabled then a std::bad_alloc is thrown.
*/
-template<typename T>
-inline T* ei_aligned_malloc(size_t size)
+inline void* ei_aligned_malloc(size_t size)
{
- if(ei_packet_traits<T>::size>1 || ei_force_aligned_malloc<T>::ret)
- {
- void *void_result;
- #ifdef __linux
- #ifdef EIGEN_EXCEPTIONS
- const int failed =
- #endif
- posix_memalign(&void_result, 16, size*sizeof(T));
+ #ifdef EIGEN_NO_MALLOC
+ ei_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
+ #endif
+
+ void *result;
+ #ifdef __linux
+ #ifdef EIGEN_EXCEPTIONS
+ const int failed =
+ #endif
+ posix_memalign(&result, 16, size);
+ #else
+ #ifdef _MSC_VER
+ result = _aligned_malloc(size, 16);
+ #elif defined(__APPLE__)
+ result = malloc(size); // Apple's malloc() already returns 16-byte-aligned ptrs
#else
- #ifdef _MSC_VER
- void_result = _aligned_malloc(size*sizeof(T), 16);
- #elif defined(__APPLE__)
- void_result = malloc(size*sizeof(T)); // Apple's malloc() already returns aligned ptrs
- #else
- void_result = _mm_malloc(size*sizeof(T), 16);
- #endif
- #ifdef EIGEN_EXCEPTIONS
- const int failed = (void_result == 0);
- #endif
+ result = _mm_malloc(size, 16);
#endif
#ifdef EIGEN_EXCEPTIONS
- if(failed)
- throw std::bad_alloc();
+ const int failed = (result == 0);
#endif
- // if the user uses Eigen on some fancy scalar type such as multiple-precision numbers,
- // and this type has a custom operator new, then we want to honor this operator new!
- // so when we use C functions to allocate memory, we must be careful to call manually the constructor using
- // the special placement-new syntax.
- return ::new(void_result) T[size];
- }
- else
- return new T[size]; // here we really want a new, not a malloc. Justification: if the user uses Eigen on
- // some fancy scalar type such as multiple-precision numbers, and this type has a custom operator new,
- // then we want to honor this operator new! Anyway this type won't have vectorization so the vectorizing path
- // is irrelevant here. Yes, we should say somewhere in the docs that if the user uses a custom scalar type then
- // he can't have both vectorization and a custom operator new on his scalar type.
+ #endif
+ #ifdef EIGEN_EXCEPTIONS
+ if(failed)
+ throw std::bad_alloc();
+ #endif
+ return result;
+}
+
+/** allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
+ * On allocation error, the returned pointer is undefined, but if exceptions are enabled then a std::bad_alloc is thrown.
+ */
+template<bool Align> inline void* ei_conditional_aligned_malloc(size_t size)
+{
+ return ei_aligned_malloc(size);
+}
+
+template<> inline void* ei_conditional_aligned_malloc<false>(size_t size)
+{
+ void *void_result = malloc(size);
+ #ifdef EIGEN_EXCEPTIONS
+ if(!void_result) throw std::bad_alloc();
+ #endif
+ return void_result;
+}
+
+/** allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
+ * On allocation error, the returned pointer is undefined, but if exceptions are enabled then a std::bad_alloc is thrown.
+ * The default constructor of T is called.
+ */
+template<typename T> T* ei_aligned_new(size_t size)
+{
+ void *void_result = ei_aligned_malloc(sizeof(T)*size);
+ return ::new(void_result) T[size];
+}
+
+template<typename T, bool Align> T* ei_conditional_aligned_new(size_t size)
+{
+ void *void_result = ei_conditional_aligned_malloc<Align>(sizeof(T)*size);
+ return ::new(void_result) T[size];
}
/** \internal free memory allocated with ei_aligned_malloc
- * The \a size parameter is used to determine on how many elements to call the destructor. If you don't
- * want any destructor to be called, just pass 0.
*/
-template<typename T>
-inline void ei_aligned_free(T* ptr, size_t size)
+inline void ei_aligned_free(void *ptr)
+{
+ #if defined(__linux)
+ free(ptr);
+ #elif defined(__APPLE__)
+ free(ptr);
+ #elif defined(_MSC_VER)
+ _aligned_free(ptr);
+ #else
+ _mm_free(ptr);
+ #endif
+}
+
+/** \internal free memory allocated with ei_conditional_aligned_malloc
+ */
+template<bool Align> inline void ei_conditional_aligned_free(void *ptr)
+{
+ ei_aligned_free(ptr);
+}
+
+template<> void ei_conditional_aligned_free<false>(void *ptr)
+{
+ free(ptr);
+}
+
+/** \internal delete the elements of an array.
+ * The \a size parameters tells on how many objects to call the destructor of T.
+ */
+template<typename T> inline void ei_delete_elements_of_array(T *ptr, size_t size)
+{
+ // always destruct an array starting from the end.
+ while(size) ptr[--size].~T();
+}
+
+/** \internal delete objects constructed with ei_aligned_new
+ * The \a size parameters tells on how many objects to call the destructor of T.
+ */
+template<typename T> void ei_aligned_delete(T *ptr, size_t size)
{
- if (ei_packet_traits<T>::size>1 || ei_force_aligned_malloc<T>::ret)
- {
- // need to call manually the dtor in case T is some user-defined fancy numeric type.
- // always destruct an array starting from the end.
- while(size) ptr[--size].~T();
- #if defined(__linux)
- free(ptr);
- #elif defined(__APPLE__)
- free(ptr);
- #elif defined(_MSC_VER)
- _aligned_free(ptr);
- #else
- _mm_free(ptr);
- #endif
- }
- else
- delete[] ptr;
+ ei_delete_elements_of_array<T>(ptr, size);
+ ei_aligned_free(ptr);
+}
+
+/** \internal delete objects constructed with ei_conditional_aligned_new
+ * The \a size parameters tells on how many objects to call the destructor of T.
+ */
+template<typename T, bool Align> inline void ei_conditional_aligned_delete(T *ptr, size_t size)
+{
+ ei_delete_elements_of_array<T>(ptr, size);
+ ei_conditional_aligned_free<Align>(ptr);
}
/** \internal \returns the number of elements which have to be skipped such that data are 16 bytes aligned */
@@ -124,10 +167,10 @@ inline static int ei_alignmentOffset(const Scalar* ptr, int maxOffset)
}
/** \internal
- * ei_aligned_stack_alloc(TYPE,SIZE) allocates an aligned buffer of sizeof(TYPE)*SIZE bytes
- * on the stack if sizeof(TYPE)*SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT.
+ * ei_aligned_stack_alloc(SIZE) allocates an aligned buffer of SIZE bytes
+ * on the stack if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT.
* Otherwise the memory is allocated on the heap.
- * Data allocated with ei_aligned_stack_alloc \b must be freed by calling ei_aligned_stack_free(PTR,TYPE,SIZE).
+ * Data allocated with ei_aligned_stack_alloc \b must be freed by calling ei_aligned_stack_free(PTR,SIZE).
* \code
* float * data = ei_aligned_stack_alloc(float,array.size());
* // ...
@@ -135,45 +178,20 @@ inline static int ei_alignmentOffset(const Scalar* ptr, int maxOffset)
* \endcode
*/
#ifdef __linux__
- #define ei_aligned_stack_alloc(TYPE,SIZE) ((sizeof(TYPE)*(SIZE)>EIGEN_STACK_ALLOCATION_LIMIT) \
- ? ei_aligned_malloc<TYPE>(SIZE) \
- : (TYPE*)alloca(sizeof(TYPE)*(SIZE)))
- #define ei_aligned_stack_free(PTR,TYPE,SIZE) if (sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT) ei_aligned_free(PTR,SIZE)
+ #define ei_aligned_stack_alloc(SIZE) (SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) \
+ ? alloca(SIZE) \
+ : ei_aligned_malloc(SIZE)
+ #define ei_aligned_stack_free(PTR,SIZE) if(SIZE>EIGEN_STACK_ALLOCATION_LIMIT) ei_aligned_free(PTR)
#else
- #define ei_aligned_stack_alloc(TYPE,SIZE) ei_aligned_malloc<TYPE>(SIZE)
- #define ei_aligned_stack_free(PTR,TYPE,SIZE) ei_aligned_free(PTR,SIZE)
+ #define ei_aligned_stack_alloc(SIZE) ei_aligned_malloc(SIZE)
+ #define ei_aligned_stack_free(PTR,SIZE) ei_aligned_free(PTR)
#endif
-#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF__INTERNAL(NeedsToAlign, TYPENAME) \
- typedef TYPENAME Eigen::ei_meta_if<(NeedsToAlign), \
- Eigen::ei_byte_forcing_aligned_malloc, \
- char \
- >::ret Eigen_ByteAlignedAsNeeded; \
- void *operator new(size_t size) throw() { \
- return Eigen::ei_aligned_malloc<Eigen_ByteAlignedAsNeeded>(size); \
- } \
- void *operator new(size_t, void *ptr) throw() { \
- return ptr; \
- } \
- void *operator new[](size_t size) throw() { \
- return Eigen::ei_aligned_malloc<Eigen_ByteAlignedAsNeeded>(size); \
- } \
- void *operator new[](size_t, void *ptr) throw() { \
- return ptr; \
- } \
- void operator delete(void * ptr) { Eigen::ei_aligned_free(static_cast<Eigen_ByteAlignedAsNeeded *>(ptr), 0); } \
- void operator delete[](void * ptr) { Eigen::ei_aligned_free(static_cast<Eigen_ByteAlignedAsNeeded *>(ptr), 0); }
-#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW \
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF__INTERNAL(true, )
-#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)\
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF__INTERNAL(NeedsToAlign, typename)
-#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(Type,Size)\
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(((Size)!=Eigen::Dynamic) && ((sizeof(Type)*(Size))%16==0))
-
+#define ei_aligned_stack_new(TYPE,SIZE) ::new(ei_aligned_stack_alloc(sizeof(TYPE)*SIZE)) TYPE[SIZE]
+#define ei_aligned_stack_delete(TYPE,PTR,SIZE) ei_delete_elements_of_array<TYPE>(PTR, SIZE); \
+ ei_aligned_stack_free(PTR,sizeof(TYPE)*SIZE)
-/** \class WithAlignedOperatorNew
- *
- * \brief Enforces instances of inherited classes to be 16 bytes aligned when allocated with operator new
+/** \brief Overloads the operator new and delete of the class Type with operators that are aligned if NeedsToAlign is true
*
* When Eigen's explicit vectorization is enabled, Eigen assumes that some fixed sizes types are aligned
* on a 16 bytes boundary. Those include all Matrix types having a sizeof multiple of 16 bytes, e.g.:
@@ -200,7 +218,8 @@ inline static int ei_alignmentOffset(const Scalar* ptr, int maxOffset)
* overloading the operator new to return aligned data when the vectorization is enabled.
* Here is a similar safe example:
* \code
- * struct Foo : public WithAlignedOperatorNew {
+ * struct Foo {
+ * EIGEN_MAKE_ALIGNED_OPERATOR_NEW(Foo)
* char dummy;
* Vector4f some_vector;
* };
@@ -210,9 +229,24 @@ inline static int ei_alignmentOffset(const Scalar* ptr, int maxOffset)
*
* \sa class ei_new_allocator
*/
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(Type,NeedsToAlign) \
+ void *operator new(size_t size) throw() { \
+ return Eigen::ei_conditional_aligned_malloc<NeedsToAlign>(size); \
+ } \
+ void *operator new[](size_t size) throw() { \
+ return Eigen::ei_conditional_aligned_malloc<NeedsToAlign>(size); \
+ } \
+ void operator delete(void * ptr) { Eigen::ei_aligned_free(ptr); } \
+ void operator delete[](void * ptr) { Eigen::ei_aligned_free(ptr); }
+
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW(Type) EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(Type,true)
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(Type,Scalar,Size) \
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(Type,((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0))
+
+/** Deprecated, use the EIGEN_MAKE_ALIGNED_OPERATOR_NEW(Class) macro instead in your own class */
struct WithAlignedOperatorNew
{
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW(WithAlignedOperatorNew)
};
/** \class ei_new_allocator
diff --git a/Eigen/src/Geometry/AlignedBox.h b/Eigen/src/Geometry/AlignedBox.h
index e0f820765..c33ae1ae8 100644
--- a/Eigen/src/Geometry/AlignedBox.h
+++ b/Eigen/src/Geometry/AlignedBox.h
@@ -41,7 +41,7 @@ template <typename _Scalar, int _AmbientDim>
class AlignedBox
{
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
+EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(AlignedBox,_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
diff --git a/Eigen/src/Geometry/Hyperplane.h b/Eigen/src/Geometry/Hyperplane.h
index a3425f6cb..1fbb94622 100644
--- a/Eigen/src/Geometry/Hyperplane.h
+++ b/Eigen/src/Geometry/Hyperplane.h
@@ -47,7 +47,7 @@ template <typename _Scalar, int _AmbientDim>
class Hyperplane
{
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(Hyperplane,_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
diff --git a/Eigen/src/Geometry/ParametrizedLine.h b/Eigen/src/Geometry/ParametrizedLine.h
index da30c8e82..e40d19bcc 100644
--- a/Eigen/src/Geometry/ParametrizedLine.h
+++ b/Eigen/src/Geometry/ParametrizedLine.h
@@ -43,7 +43,7 @@ template <typename _Scalar, int _AmbientDim>
class ParametrizedLine
{
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(_Scalar,_AmbientDim)
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(ParametrizedLine,_Scalar,_AmbientDim)
enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h
index f595adcb9..86f343291 100644
--- a/Eigen/src/Geometry/Quaternion.h
+++ b/Eigen/src/Geometry/Quaternion.h
@@ -65,7 +65,7 @@ class Quaternion : public RotationBase<Quaternion<_Scalar>,3>
Coefficients m_coeffs;
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(_Scalar,4)
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(Quaternion,_Scalar,4)
using Base::operator*;
diff --git a/Eigen/src/Geometry/Scaling.h b/Eigen/src/Geometry/Scaling.h
index d46296707..0ee5d38b0 100644
--- a/Eigen/src/Geometry/Scaling.h
+++ b/Eigen/src/Geometry/Scaling.h
@@ -43,7 +43,7 @@ template<typename _Scalar, int _Dim>
class Scaling
{
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(_Scalar,_Dim)
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(Scaling,_Scalar,_Dim)
/** dimension of the space */
enum { Dim = _Dim };
/** the scalar type of the coefficients */
diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h
index fc7f5b002..2f1b9a75c 100644
--- a/Eigen/src/Geometry/Transform.h
+++ b/Eigen/src/Geometry/Transform.h
@@ -63,7 +63,7 @@ template<typename _Scalar, int _Dim>
class Transform
{
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(Transform,_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))
enum {
Dim = _Dim, ///< space dimension in which the transformation holds
HDim = _Dim+1 ///< size of a respective homogeneous vector
@@ -95,7 +95,9 @@ public:
inline Transform() { }
inline Transform(const Transform& other)
- { m_matrix = other.m_matrix; }
+ {
+ m_matrix = other.m_matrix;
+ }
inline explicit Transform(const TranslationType& t) { *this = t; }
inline explicit Transform(const ScalingType& s) { *this = s; }
diff --git a/Eigen/src/Geometry/Translation.h b/Eigen/src/Geometry/Translation.h
index ab8ce3899..9a51830d5 100644
--- a/Eigen/src/Geometry/Translation.h
+++ b/Eigen/src/Geometry/Translation.h
@@ -43,7 +43,7 @@ template<typename _Scalar, int _Dim>
class Translation
{
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(_Scalar,_Dim)
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE(Translation,_Scalar,_Dim)
/** dimension of the space */
enum { Dim = _Dim };
/** the scalar type of the coefficients */
diff --git a/Eigen/src/Sparse/SparseLDLT.h b/Eigen/src/Sparse/SparseLDLT.h
index 2441646be..a1bac4d08 100644
--- a/Eigen/src/Sparse/SparseLDLT.h
+++ b/Eigen/src/Sparse/SparseLDLT.h
@@ -192,7 +192,7 @@ void SparseLDLT<MatrixType,Backend>::_symbolic(const MatrixType& a)
m_matrix.resize(size, size);
m_parent.resize(size);
m_nonZerosPerCol.resize(size);
- int * tags = ei_aligned_stack_alloc(int, size);
+ int * tags = ei_aligned_stack_new(int, size);
const int* Ap = a._outerIndexPtr();
const int* Ai = a._innerIndexPtr();
@@ -238,7 +238,7 @@ void SparseLDLT<MatrixType,Backend>::_symbolic(const MatrixType& a)
Lp[k+1] = Lp[k] + m_nonZerosPerCol[k];
m_matrix.resizeNonZeros(Lp[size]);
- ei_aligned_stack_free(tags, int, size);
+ ei_aligned_stack_delete(int, tags, size);
}
template<typename MatrixType, int Backend>
@@ -257,9 +257,9 @@ bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
Scalar* Lx = m_matrix._valuePtr();
m_diag.resize(size);
- Scalar * y = ei_aligned_stack_alloc(Scalar, size);
- int * pattern = ei_aligned_stack_alloc(int, size);
- int * tags = ei_aligned_stack_alloc(int, size);
+ Scalar * y = ei_aligned_stack_new(Scalar, size);
+ int * pattern = ei_aligned_stack_new(int, size);
+ int * tags = ei_aligned_stack_new(int, size);
const int* P = 0;
const int* Pinv = 0;
@@ -315,9 +315,9 @@ bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
}
}
- ei_aligned_stack_free(y, Scalar, size);
- ei_aligned_stack_free(pattern, int, size);
- ei_aligned_stack_free(tags, int, size);
+ ei_aligned_stack_delete(Scalar, y, size);
+ ei_aligned_stack_delete(int, pattern, size);
+ ei_aligned_stack_delete(int, tags, size);
return ok; /* success, diagonal of D is all nonzero */
}
diff --git a/bench/benchVecAdd.cpp b/bench/benchVecAdd.cpp
index ef85b3ced..396ab6a63 100644
--- a/bench/benchVecAdd.cpp
+++ b/bench/benchVecAdd.cpp
@@ -21,9 +21,9 @@ int main(int argc, char* argv[])
{
int size = SIZE * 8;
int size2 = size * size;
- Scalar* a = ei_aligned_malloc<Scalar>(size2);
- Scalar* b = ei_aligned_malloc<Scalar>(size2+4)+1;
- Scalar* c = ei_aligned_malloc<Scalar>(size2);
+ Scalar* a = ei_aligned_new<Scalar>(size2);
+ Scalar* b = ei_aligned_new<Scalar>(size2+4)+1;
+ Scalar* c = ei_aligned_new<Scalar>(size2);
for (int i=0; i<size; ++i)
{
diff --git a/bench/btl/libs/hand_vec/hand_vec_interface.hh b/bench/btl/libs/hand_vec/hand_vec_interface.hh
index 592be82d5..4e7d549ce 100755
--- a/bench/btl/libs/hand_vec/hand_vec_interface.hh
+++ b/bench/btl/libs/hand_vec/hand_vec_interface.hh
@@ -38,16 +38,16 @@ public :
typedef typename f77_interface_base<real>::gene_vector gene_vector;
static void free_matrix(gene_matrix & A, int N){
- ei_aligned_free(A, 0);
+ ei_aligned_delete(A);
}
static void free_vector(gene_vector & B){
- ei_aligned_free(B, 0);
+ ei_aligned_delete(B);
}
static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){
int N = A_stl.size();
- A = ei_aligned_malloc<real>(N*N);
+ A = ei_aligned_new<real>(N*N);
for (int j=0;j<N;j++)
for (int i=0;i<N;i++)
A[i+N*j] = A_stl[j][i];
@@ -55,7 +55,7 @@ public :
static inline void vector_from_stl(gene_vector & B, stl_vector & B_stl){
int N = B_stl.size();
- B = ei_aligned_malloc<real>(N);
+ B = ei_aligned_new<real>(N);
for (int i=0;i<N;i++)
B[i] = B_stl[i];
}
diff --git a/doc/InsideEigenExample.dox b/doc/InsideEigenExample.dox
index 22635120b..3242acf4a 100644
--- a/doc/InsideEigenExample.dox
+++ b/doc/InsideEigenExample.dox
@@ -101,10 +101,10 @@ with size=50, rows=50, columns=1.
Here is this constructor:
\code
-inline ei_matrix_storage(int size, int rows, int) : m_data(ei_aligned_malloc<T>(size)), m_rows(rows) {}
+inline ei_matrix_storage(int size, int rows, int) : m_data(ei_aligned_new<T>(size)), m_rows(rows) {}
\endcode
-Here, the \a m_data member is the actual array of coefficients of the matrix. As you see, it is dynamically allocated. Rather than calling new[] or malloc(), as you can see, we have our own ei_aligned_malloc defined in src/Core/util/Memory.h. What it does is that if vectorization is enabled, then it uses a platform-specific call to allocate a 128-bit-aligned array, as that is very useful for vectorization with both SSE2 and AltiVec. If vectorization is disabled, it amounts to the standard new[].
+Here, the \a m_data member is the actual array of coefficients of the matrix. As you see, it is dynamically allocated. Rather than calling new[] or malloc(), as you can see, we have our own ei_aligned_new defined in src/Core/util/Memory.h. What it does is that if vectorization is enabled, then it uses a platform-specific call to allocate a 128-bit-aligned array, as that is very useful for vectorization with both SSE2 and AltiVec. If vectorization is disabled, it amounts to the standard new[].
As you can see, the constructor also sets the \a m_rows member to \a size. Notice that there is no \a m_columns member: indeed, in this partial specialization of ei_matrix_storage, we know the number of columns at compile-time, since the _Cols template parameter is different from Dynamic. Namely, in our case, _Cols is 1, which is to say that our vector is just a matrix with 1 column. Hence, there is no need to store the number of columns as a runtime variable.
diff --git a/doc/UnalignedArrayAssert.dox b/doc/UnalignedArrayAssert.dox
index 8fdd74fae..8a001dfa3 100644
--- a/doc/UnalignedArrayAssert.dox
+++ b/doc/UnalignedArrayAssert.dox
@@ -10,7 +10,7 @@ namespace Eigen {
- \ref stillstillstuck
- \ref movetotop
- \ref bugineigen
- - \ref nomacro
+ - \ref conditional
<hr>
\section what What kind of code made this assertion fail?
@@ -54,7 +54,7 @@ class Foo
Eigen::Vector2d v;
...
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW(Foo)
};
...
@@ -144,7 +144,7 @@ class Foo
double x;
Eigen::Vector2d v;
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW(Foo)
};
\endcode
@@ -156,7 +156,7 @@ class Foo
Eigen::Vector2d v;
double x;
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW(Foo)
};
\endcode
@@ -168,25 +168,6 @@ Dynamic-size matrices and vectors, such as Eigen::VectorXd, allocate dynamically
No, it's not our bug. It's more like an inherent problem of the C++ language -- though it must be said that any other existing language probably has the same problem. The problem is that there is no way that you can specify an aligned "operator new" that would propagate to classes having you as member data.
-\section nomacro I don't like macros! Any solution with inheritance?
-
-Yes, you can let your class Foo publicly inherit Eigen::WithAlignedOperatorNew, like this:
-
-\code
-class Foo : public Eigen::WithAlignedOperatorNew
-{
- ...
- Eigen::Vector2d v;
- ...
-};
-
-...
-
-Foo *foo = new Foo;
-\endcode
-
-This solution gives the same result as the macro. It has the disadvantage that if Foo already had a base class, you are now doing multiple inheritance, and this situation is sometimes handled wrongly by certain compilers -- we've been having trouble with MSVC. The solution with the macro is therefore safer.
-
\section conditional What if I want to do this conditionnally (depending on template parameters) ?
For this situation, we offer the macro EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign). It will generate aligned operators like EIGEN_MAKE_ALIGNED_OPERATOR_NEW if NeedsToAlign is true. It will generate operators with the default alignment if NeedsToAlign is false.
@@ -202,7 +183,7 @@ template<int n> class Foo
Vector v;
...
public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(Foo,NeedsToAlign)
};
...
diff --git a/test/dynalloc.cpp b/test/dynalloc.cpp
index 3b0d703ae..d829008d5 100644
--- a/test/dynalloc.cpp
+++ b/test/dynalloc.cpp
@@ -25,38 +25,17 @@
#include "main.h"
// test compilation with both a struct and a class...
-struct MyStruct : WithAlignedOperatorNew
+struct MyStruct
{
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW(MyStruct)
char dummychar;
Vector4f avec;
};
-class MyClassA : public WithAlignedOperatorNew
-{
- public:
- char dummychar;
- Vector4f avec;
-};
-
-// ..as well as with some other base classes
-
-class MyBaseClass
-{
- public:
- char dummychar;
- float afloat;
-};
-
-class MyClassB : public WithAlignedOperatorNew, public MyBaseClass
-{
- public:
- char dummychar;
- Vector4f avec;
-};
-
-class MyClassC : public MyBaseClass, public WithAlignedOperatorNew
+class MyClassA
{
public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW(MyClassA)
char dummychar;
Vector4f avec;
};
@@ -85,8 +64,6 @@ void test_dynalloc()
{
MyStruct foo0; VERIFY(size_t(foo0.avec.data())%16==0);
MyClassA fooA; VERIFY(size_t(fooA.avec.data())%16==0);
- MyClassB fooB; VERIFY(size_t(fooB.avec.data())%16==0);
- MyClassC fooC; VERIFY(size_t(fooC.avec.data())%16==0);
}
// dynamic allocation, single object
@@ -94,12 +71,8 @@ void test_dynalloc()
{
MyStruct *foo0 = new MyStruct(); VERIFY(size_t(foo0->avec.data())%16==0);
MyClassA *fooA = new MyClassA(); VERIFY(size_t(fooA->avec.data())%16==0);
- MyClassB *fooB = new MyClassB(); VERIFY(size_t(fooB->avec.data())%16==0);
- MyClassC *fooC = new MyClassC(); VERIFY(size_t(fooC->avec.data())%16==0);
delete foo0;
delete fooA;
- delete fooB;
- delete fooC;
}
// dynamic allocation, array
@@ -108,12 +81,8 @@ void test_dynalloc()
{
MyStruct *foo0 = new MyStruct[N]; VERIFY(size_t(foo0->avec.data())%16==0);
MyClassA *fooA = new MyClassA[N]; VERIFY(size_t(fooA->avec.data())%16==0);
- MyClassB *fooB = new MyClassB[N]; VERIFY(size_t(fooB->avec.data())%16==0);
- MyClassC *fooC = new MyClassC[N]; VERIFY(size_t(fooC->avec.data())%16==0);
delete[] foo0;
delete[] fooA;
- delete[] fooB;
- delete[] fooC;
}
// std::vector
diff --git a/test/map.cpp b/test/map.cpp
index 83953b698..5159dffa1 100644
--- a/test/map.cpp
+++ b/test/map.cpp
@@ -31,8 +31,8 @@ template<typename VectorType> void map_class(const VectorType& m)
int size = m.size();
// test Map.h
- Scalar* array1 = ei_aligned_malloc<Scalar>(size);
- Scalar* array2 = ei_aligned_malloc<Scalar>(size);
+ Scalar* array1 = ei_aligned_new<Scalar>(size);
+ Scalar* array2 = ei_aligned_new<Scalar>(size);
Scalar* array3 = new Scalar[size+1];
Scalar* array3unaligned = size_t(array3)%16 == 0 ? array3+1 : array3;
@@ -45,8 +45,8 @@ template<typename VectorType> void map_class(const VectorType& m)
VERIFY_IS_APPROX(ma1, ma2);
VERIFY_IS_APPROX(ma1, ma3);
- ei_aligned_free(array1, size);
- ei_aligned_free(array2, size);
+ ei_aligned_delete(array1, size);
+ ei_aligned_delete(array2, size);
delete[] array3;
}
@@ -57,8 +57,8 @@ template<typename VectorType> void map_static_methods(const VectorType& m)
int size = m.size();
// test Map.h
- Scalar* array1 = ei_aligned_malloc<Scalar>(size);
- Scalar* array2 = ei_aligned_malloc<Scalar>(size);
+ Scalar* array1 = ei_aligned_new<Scalar>(size);
+ Scalar* array2 = ei_aligned_new<Scalar>(size);
Scalar* array3 = new Scalar[size+1];
Scalar* array3unaligned = size_t(array3)%16 == 0 ? array3+1 : array3;
@@ -71,8 +71,8 @@ template<typename VectorType> void map_static_methods(const VectorType& m)
VERIFY_IS_APPROX(ma1, ma2);
VERIFY_IS_APPROX(ma1, ma3);
- ei_aligned_free(array1, size);
- ei_aligned_free(array2, size);
+ ei_aligned_delete(array1, size);
+ ei_aligned_delete(array2, size);
delete[] array3;
}
diff --git a/test/nomalloc.cpp b/test/nomalloc.cpp
index be4c35c48..82119e9b3 100644
--- a/test/nomalloc.cpp
+++ b/test/nomalloc.cpp
@@ -25,28 +25,13 @@
// this hack is needed to make this file compiles with -pedantic (gcc)
#define throw(X)
-// discard vectorization since the global operator new is not called in that case
-#define EIGEN_DONT_VECTORIZE 1
-// discard stack allocation as that too bypasses the global operator new
+// discard stack allocation as that too bypasses malloc
#define EIGEN_STACK_ALLOCATION_LIMIT 0
+// any heap allocation will raise an assert
+#define EIGEN_NO_MALLOC
#include "main.h"
-void* operator new[] (size_t n)
-{
- ei_assert(false && "operator new should never be called with fixed size path");
- // the following is in case assertion are disabled
- std::cerr << "operator new should never be called with fixed size path" << std::endl;
- exit(2);
- void* p = malloc(n);
- return p;
-}
-
-void operator delete[](void* p) throw()
-{
- free(p);
-}
-
template<typename MatrixType> void nomalloc(const MatrixType& m)
{
/* this test check no dynamic memory allocation are issued with fixed-size matrices
diff --git a/test/unalignedassert.cpp b/test/unalignedassert.cpp
index 7b6e3535d..8e0486e92 100644
--- a/test/unalignedassert.cpp
+++ b/test/unalignedassert.cpp
@@ -55,15 +55,16 @@ struct Bad6
Matrix<double, 3, 4> m; // bad: same reason
};
-struct Good7 : Eigen::WithAlignedOperatorNew
+struct Good7
{
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW(Good7)
Vector2d m;
float f; // make the struct have sizeof%16!=0 to make it a little more tricky when we allow an array of 2 such objects
};
struct Good8
{
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW(Good8)
float f; // try the f at first -- the EIGEN_ALIGN_128 attribute of m should make that still work
Matrix4f m;
};
@@ -76,7 +77,7 @@ struct Good9
template<bool Align> struct Depends
{
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(Align)
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(Depends,Align)
Vector2d m;
float f;
};
@@ -97,7 +98,7 @@ void check_unalignedassert_bad()
float buf[sizeof(T)+16];
float *unaligned = buf;
while((reinterpret_cast<size_t>(unaligned)&0xf)==0) ++unaligned; // make sure unaligned is really unaligned
- T *x = new(static_cast<void*>(unaligned)) T;
+ T *x = ::new(static_cast<void*>(unaligned)) T;
x->~T();
}