aboutsummaryrefslogtreecommitdiffhomepage
path: root/doc
diff options
context:
space:
mode:
authorGravatar Benoit Jacob <jacob.benoit.1@gmail.com>2010-10-25 10:15:22 -0400
committerGravatar Benoit Jacob <jacob.benoit.1@gmail.com>2010-10-25 10:15:22 -0400
commit4716040703be1ee906439385d20475dcddad5ce3 (patch)
tree8efd3cf3007d8360e66f38e2d280127cbb70daa6 /doc
parentca85a1f6c5fc33ac382aa2d7ba2da63d55d3223e (diff)
bug #86 : use internal:: namespace instead of ei_ prefix
Diffstat (limited to 'doc')
-rw-r--r--doc/D11_UnalignedArrayAssert.dox2
-rw-r--r--doc/I00_CustomizingEigen.dox36
-rw-r--r--doc/I03_InsideEigenExample.dox114
-rw-r--r--doc/I11_Aliasing.dox2
-rw-r--r--doc/I13_FunctionsTakingEigenTypes.dox8
-rw-r--r--doc/snippets/Tridiagonalization_decomposeInPlace.cpp2
6 files changed, 82 insertions, 82 deletions
diff --git a/doc/D11_UnalignedArrayAssert.dox b/doc/D11_UnalignedArrayAssert.dox
index 5b9ac5a2a..d173ee5f4 100644
--- a/doc/D11_UnalignedArrayAssert.dox
+++ b/doc/D11_UnalignedArrayAssert.dox
@@ -5,7 +5,7 @@ namespace Eigen {
Hello! You are seeing this webpage because your program terminated on an assertion failure like this one:
<pre>
my_program: path/to/eigen/Eigen/src/Core/DenseStorage.h:44:
-Eigen::ei_matrix_array<T, Size, MatrixOptions, Align>::ei_matrix_array()
+Eigen::internal::matrix_array<T, Size, MatrixOptions, Align>::internal::matrix_array()
[with T = double, int Size = 2, int MatrixOptions = 2, bool Align = true]:
Assertion `(reinterpret_cast<size_t>(array) & 0xf) == 0 && "this assertion
is explained here: http://eigen.tuxfamily.org/dox/UnalignedArrayAssert.html
diff --git a/doc/I00_CustomizingEigen.dox b/doc/I00_CustomizingEigen.dox
index d87fb8d6a..1c7a45355 100644
--- a/doc/I00_CustomizingEigen.dox
+++ b/doc/I00_CustomizingEigen.dox
@@ -43,7 +43,7 @@ inline Scalar squaredDistanceTo(const MatrixBase<OtherDerived>& other) const
template<typename OtherDerived>
inline RealScalar distanceTo(const MatrixBase<OtherDerived>& other) const
-{ return ei_sqrt(derived().squaredDistanceTo(other)); }
+{ return internal::sqrt(derived().squaredDistanceTo(other)); }
inline void scaleTo(RealScalar l) { RealScalar vl = norm(); if (vl>1e-9) derived() *= (l/vl); }
@@ -58,13 +58,13 @@ void makeFloor(const MatrixBase<OtherDerived>& other) { derived() = derived().cw
template<typename OtherDerived>
void makeCeil(const MatrixBase<OtherDerived>& other) { derived() = derived().cwiseMax(other.derived()); }
-const CwiseUnaryOp<ei_scalar_add_op<Scalar>, Derived>
+const CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>
operator+(const Scalar& scalar) const
-{ return CwiseUnaryOp<ei_scalar_add_op<Scalar>, Derived>(derived(), ei_scalar_add_op<Scalar>(scalar)); }
+{ return CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>(derived(), internal::scalar_add_op<Scalar>(scalar)); }
-friend const CwiseUnaryOp<ei_scalar_add_op<Scalar>, Derived>
+friend const CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>
operator+(const Scalar& scalar, const MatrixBase<Derived>& mat)
-{ return CwiseUnaryOp<ei_scalar_add_op<Scalar>, Derived>(mat.derived(), ei_scalar_add_op<Scalar>(scalar)); }
+{ return CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>(mat.derived(), internal::scalar_add_op<Scalar>(scalar)); }
\endcode
Then one can the following declaration in the config.h or whatever prerequisites header file of his project:
@@ -124,7 +124,7 @@ By default, Eigen currently supports the following scalar types: \c int, \c floa
In order to add support for a custom type \c T you need:
1 - make sure the common operator (+,-,*,/,etc.) are supported by the type \c T
2 - add a specialization of struct Eigen::NumTraits<T> (see \ref NumTraits)
- 3 - define a couple of math functions for your type such as: ei_sqrt, ei_abs, etc...
+ 3 - define a couple of math functions for your type such as: internal::sqrt, internal::abs, etc...
(see the file Eigen/src/Core/MathFunctions.h)
Here is a concrete example adding support for the Adolc's \c adouble type. <a href="https://projects.coin-or.org/ADOL-C">Adolc</a> is an automatic differentiation library. The type \c adouble is basically a real value tracking the values of any number of partial derivatives.
@@ -158,21 +158,21 @@ template<> struct NumTraits<adtl::adouble>
}
// the Adolc's type adouble is defined in the adtl namespace
-// therefore, the following ei_* functions *must* be defined
+// therefore, the following internal::* functions *must* be defined
// in the same namespace
namespace adtl {
- inline const adouble& ei_conj(const adouble& x) { return x; }
- inline const adouble& ei_real(const adouble& x) { return x; }
- inline adouble ei_imag(const adouble&) { return 0.; }
- inline adouble ei_abs(const adouble& x) { return fabs(x); }
- inline adouble ei_abs2(const adouble& x) { return x*x; }
- inline adouble ei_sqrt(const adouble& x) { return sqrt(x); }
- inline adouble ei_exp(const adouble& x) { return exp(x); }
- inline adouble ei_log(const adouble& x) { return log(x); }
- inline adouble ei_sin(const adouble& x) { return sin(x); }
- inline adouble ei_cos(const adouble& x) { return cos(x); }
- inline adouble ei_pow(const adouble& x, adouble y) { return pow(x, y); }
+ inline const adouble& internal::conj(const adouble& x) { return x; }
+ inline const adouble& internal::real(const adouble& x) { return x; }
+ inline adouble internal::imag(const adouble&) { return 0.; }
+ inline adouble internal::abs(const adouble& x) { return fabs(x); }
+ inline adouble internal::abs2(const adouble& x) { return x*x; }
+ inline adouble internal::sqrt(const adouble& x) { return sqrt(x); }
+ inline adouble internal::exp(const adouble& x) { return exp(x); }
+ inline adouble internal::log(const adouble& x) { return log(x); }
+ inline adouble internal::sin(const adouble& x) { return sin(x); }
+ inline adouble internal::cos(const adouble& x) { return cos(x); }
+ inline adouble internal::pow(const adouble& x, adouble y) { return pow(x, y); }
}
diff --git a/doc/I03_InsideEigenExample.dox b/doc/I03_InsideEigenExample.dox
index 9ee253133..0c60984a2 100644
--- a/doc/I03_InsideEigenExample.dox
+++ b/doc/I03_InsideEigenExample.dox
@@ -101,10 +101,10 @@ with size=50, rows=50, columns=1.
Here is this constructor:
\code
-inline DenseStorage(int size, int rows, int) : m_data(ei_aligned_new<T>(size)), m_rows(rows) {}
+inline DenseStorage(int size, int rows, int) : m_data(internal::aligned_new<T>(size)), m_rows(rows) {}
\endcode
-Here, the \a m_data member is the actual array of coefficients of the matrix. As you see, it is dynamically allocated. Rather than calling new[] or malloc(), as you can see, we have our own ei_aligned_new defined in src/Core/util/Memory.h. What it does is that if vectorization is enabled, then it uses a platform-specific call to allocate a 128-bit-aligned array, as that is very useful for vectorization with both SSE2 and AltiVec. If vectorization is disabled, it amounts to the standard new[].
+Here, the \a m_data member is the actual array of coefficients of the matrix. As you see, it is dynamically allocated. Rather than calling new[] or malloc(), as you can see, we have our own internal::aligned_new defined in src/Core/util/Memory.h. What it does is that if vectorization is enabled, then it uses a platform-specific call to allocate a 128-bit-aligned array, as that is very useful for vectorization with both SSE2 and AltiVec. If vectorization is disabled, it amounts to the standard new[].
As you can see, the constructor also sets the \a m_rows member to \a size. Notice that there is no \a m_columns member: indeed, in this partial specialization of DenseStorage, we know the number of columns at compile-time, since the _Cols template parameter is different from Dynamic. Namely, in our case, _Cols is 1, which is to say that our vector is just a matrix with 1 column. Hence, there is no need to store the number of columns as a runtime variable.
@@ -136,7 +136,7 @@ MatrixBase::operator+(const MatrixBase&)
The return type of this operator is
\code
-CwiseBinaryOp<ei_scalar_sum_op<float>, VectorXf, VectorXf>
+CwiseBinaryOp<internal::scalar_sum_op<float>, VectorXf, VectorXf>
\endcode
The CwiseBinaryOp class is our first encounter with an expression template. As we said, the operator+ doesn't by itself perform any computation, it just returns an abstract "sum of vectors" expression. Since there are also "difference of vectors" and "coefficient-wise product of vectors" expressions, we unify them all as "coefficient-wise binary operations", which we abbreviate as "CwiseBinaryOp". "Coefficient-wise" means that the operations is performed coefficient by coefficient. "binary" means that there are two operands -- we are adding two vectors with one another.
@@ -177,7 +177,7 @@ class MatrixBase
// ...
template<typename OtherDerived>
- const CwiseBinaryOp<ei_scalar_sum_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
+ const CwiseBinaryOp<internal::scalar_sum_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
operator+(const MatrixBase<OtherDerived> &other) const;
// ...
@@ -186,17 +186,17 @@ class MatrixBase
Here of course, \a Derived and \a OtherDerived are VectorXf.
-As we said, CwiseBinaryOp is also used for other operations such as substration, so it takes another template parameter determining the operation that will be applied to coefficients. This template parameter is a functor, that is, a class in which we have an operator() so it behaves like a function. Here, the functor used is ei_scalar_sum_op. It is defined in src/Core/Functors.h.
+As we said, CwiseBinaryOp is also used for other operations such as substration, so it takes another template parameter determining the operation that will be applied to coefficients. This template parameter is a functor, that is, a class in which we have an operator() so it behaves like a function. Here, the functor used is internal::scalar_sum_op. It is defined in src/Core/Functors.h.
-Let us now explain the ei_traits here. The ei_scalar_sum_op class takes one template parameter: the type of the numbers to handle. Here of course we want to pass the scalar type (a.k.a. numeric type) of VectorXf, which is \c float. How do we determine which is the scalar type of \a Derived ? Throughout Eigen, all matrix and expression types define a typedef \a Scalar which gives its scalar type. For example, VectorXf::Scalar is a typedef for \c float. So here, if life was easy, we could find the numeric type of \a Derived as just
+Let us now explain the internal::traits here. The internal::scalar_sum_op class takes one template parameter: the type of the numbers to handle. Here of course we want to pass the scalar type (a.k.a. numeric type) of VectorXf, which is \c float. How do we determine which is the scalar type of \a Derived ? Throughout Eigen, all matrix and expression types define a typedef \a Scalar which gives its scalar type. For example, VectorXf::Scalar is a typedef for \c float. So here, if life was easy, we could find the numeric type of \a Derived as just
\code
typename Derived::Scalar
\endcode
Unfortunately, we can't do that here, as the compiler would complain that the type Derived hasn't yet been defined. So we use a workaround: in src/Core/util/ForwardDeclarations.h, we declared (not defined!) all our subclasses, like Matrix, and we also declared the following class template:
\code
-template<typename T> struct ei_traits;
+template<typename T> struct internal::traits;
\endcode
-In src/Core/Matrix.h, right \em before the definition of class Matrix, we define a partial specialization of ei_traits for T=Matrix\<any template parameters\>. In this specialization of ei_traits, we define the Scalar typedef. So when we actually define Matrix, it is legal to refer to "typename ei_traits\<Matrix\>::Scalar".
+In src/Core/Matrix.h, right \em before the definition of class Matrix, we define a partial specialization of internal::traits for T=Matrix\<any template parameters\>. In this specialization of internal::traits, we define the Scalar typedef. So when we actually define Matrix, it is legal to refer to "typename internal::traits\<Matrix\>::Scalar".
Anyway, we have declared our operator+. In our case, where \a Derived and \a OtherDerived are VectorXf, the above declaration amounts to:
\code
@@ -204,7 +204,7 @@ class MatrixBase<VectorXf>
{
// ...
- const CwiseBinaryOp<ei_scalar_sum_op<float>, VectorXf, VectorXf>
+ const CwiseBinaryOp<internal::scalar_sum_op<float>, VectorXf, VectorXf>
operator+(const MatrixBase<VectorXf> &other) const;
// ...
@@ -228,7 +228,7 @@ What operator= is being called here? The vector u is an object of class VectorXf
template<typename OtherDerived>
inline Matrix& operator=(const MatrixBase<OtherDerived>& other)
{
- ei_assert(m_storage.data()!=0 && "you cannot use operator= with a non initialized matrix (instead use set()");
+ eigen_assert(m_storage.data()!=0 && "you cannot use operator= with a non initialized matrix (instead use set()");
return Base::operator=(other.derived());
}
\endcode
@@ -239,11 +239,11 @@ Here, Base is a typedef for MatrixBase\<Matrix\>. So, what is being called is th
\endcode
Here, \a Derived is VectorXf (since u is a VectorXf) and \a OtherDerived is CwiseBinaryOp. More specifically, as explained in the previous section, \a OtherDerived is:
\code
-CwiseBinaryOp<ei_scalar_sum_op<float>, VectorXf, VectorXf>
+CwiseBinaryOp<internal::scalar_sum_op<float>, VectorXf, VectorXf>
\endcode
So the full prototype of the operator= being called is:
\code
-VectorXf& MatrixBase<VectorXf>::operator=(const MatrixBase<CwiseBinaryOp<ei_scalar_sum_op<float>, VectorXf, VectorXf> > & other);
+VectorXf& MatrixBase<VectorXf>::operator=(const MatrixBase<CwiseBinaryOp<internal::scalar_sum_op<float>, VectorXf, VectorXf> > & other);
\endcode
This operator= literally reads "copying a sum of two VectorXf's into another VectorXf".
@@ -256,11 +256,11 @@ template<typename OtherDerived>
inline Derived& MatrixBase<Derived>
::operator=(const MatrixBase<OtherDerived>& other)
{
- return ei_assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
+ return internal::assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
}
\endcode
-OK so our next task is to understand ei_assign_selector :)
+OK so our next task is to understand internal::assign_selector :)
Here is its declaration (all that is still in the same file src/Core/Assign.h)
\code
@@ -271,24 +271,24 @@ template<typename Derived, typename OtherDerived,
&& int(Derived::RowsAtCompileTime) == int(OtherDerived::ColsAtCompileTime)
&& int(Derived::ColsAtCompileTime) == int(OtherDerived::RowsAtCompileTime)
&& int(Derived::SizeAtCompileTime) != 1>
-struct ei_assign_selector;
+struct internal::assign_selector;
\endcode
-So ei_assign_selector takes 4 template parameters, but the 2 last ones are automatically determined by the 2 first ones.
+So internal::assign_selector takes 4 template parameters, but the 2 last ones are automatically determined by the 2 first ones.
-EvalBeforeAssigning is here to enforce the EvalBeforeAssigningBit. As explained <a href="TopicLazyEvaluation.html">here</a>, certain expressions have this flag which makes them automatically evaluate into temporaries before assigning them to another expression. This is the case of the Product expression, in order to avoid strange aliasing effects when doing "m = m * m;" However, of course here our CwiseBinaryOp expression doesn't have the EvalBeforeAssigningBit: we said since the beginning that we didn't want a temporary to be introduced here. So if you go to src/Core/CwiseBinaryOp.h, you'll see that the Flags in ei_traits\<CwiseBinaryOp\> don't include the EvalBeforeAssigningBit. The Flags member of CwiseBinaryOp is then imported from the ei_traits by the EIGEN_GENERIC_PUBLIC_INTERFACE macro. Anyway, here the template parameter EvalBeforeAssigning has the value \c false.
+EvalBeforeAssigning is here to enforce the EvalBeforeAssigningBit. As explained <a href="TopicLazyEvaluation.html">here</a>, certain expressions have this flag which makes them automatically evaluate into temporaries before assigning them to another expression. This is the case of the Product expression, in order to avoid strange aliasing effects when doing "m = m * m;" However, of course here our CwiseBinaryOp expression doesn't have the EvalBeforeAssigningBit: we said since the beginning that we didn't want a temporary to be introduced here. So if you go to src/Core/CwiseBinaryOp.h, you'll see that the Flags in internal::traits\<CwiseBinaryOp\> don't include the EvalBeforeAssigningBit. The Flags member of CwiseBinaryOp is then imported from the internal::traits by the EIGEN_GENERIC_PUBLIC_INTERFACE macro. Anyway, here the template parameter EvalBeforeAssigning has the value \c false.
NeedToTranspose is here for the case where the user wants to copy a row-vector into a column-vector. We allow this as a special exception to the general rule that in assignments we require the dimesions to match. Anyway, here both the left-hand and right-hand sides are column vectors, in the sense that ColsAtCompileTime is equal to 1. So NeedToTranspose is \c false too.
So, here we are in the partial specialization:
\code
-ei_assign_selector<Derived, OtherDerived, false, false>
+internal::assign_selector<Derived, OtherDerived, false, false>
\endcode
Here's how it is defined:
\code
template<typename Derived, typename OtherDerived>
-struct ei_assign_selector<Derived,OtherDerived,false,false> {
+struct internal::assign_selector<Derived,OtherDerived,false,false> {
static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); }
};
\endcode
@@ -302,48 +302,48 @@ inline Derived& MatrixBase<Derived>
::lazyAssign(const MatrixBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
- ei_assert(rows() == other.rows() && cols() == other.cols());
- ei_assign_impl<Derived, OtherDerived>::run(derived(),other.derived());
+ eigen_assert(rows() == other.rows() && cols() == other.cols());
+ internal::assign_impl<Derived, OtherDerived>::run(derived(),other.derived());
return derived();
}
\endcode
What do we see here? Some assertions, and then the only interesting line is:
\code
- ei_assign_impl<Derived, OtherDerived>::run(derived(),other.derived());
+ internal::assign_impl<Derived, OtherDerived>::run(derived(),other.derived());
\endcode
-OK so now we want to know what is inside ei_assign_impl.
+OK so now we want to know what is inside internal::assign_impl.
Here is its declaration:
\code
template<typename Derived1, typename Derived2,
- int Vectorization = ei_assign_traits<Derived1, Derived2>::Vectorization,
- int Unrolling = ei_assign_traits<Derived1, Derived2>::Unrolling>
-struct ei_assign_impl;
+ int Vectorization = internal::assign_traits<Derived1, Derived2>::Vectorization,
+ int Unrolling = internal::assign_traits<Derived1, Derived2>::Unrolling>
+struct internal::assign_impl;
\endcode
-Again, ei_assign_selector takes 4 template parameters, but the 2 last ones are automatically determined by the 2 first ones.
+Again, internal::assign_selector takes 4 template parameters, but the 2 last ones are automatically determined by the 2 first ones.
-These two parameters \a Vectorization and \a Unrolling are determined by a helper class ei_assign_traits. Its job is to determine which vectorization strategy to use (that is \a Vectorization) and which unrolling strategy to use (that is \a Unrolling).
+These two parameters \a Vectorization and \a Unrolling are determined by a helper class internal::assign_traits. Its job is to determine which vectorization strategy to use (that is \a Vectorization) and which unrolling strategy to use (that is \a Unrolling).
-We'll not enter into the details of how these strategies are chosen (this is in the implementation of ei_assign_traits at the top of the same file). Let's just say that here \a Vectorization has the value \a LinearVectorization, and \a Unrolling has the value \a NoUnrolling (the latter is obvious since our vectors have dynamic size so there's no way to unroll the loop at compile-time).
+We'll not enter into the details of how these strategies are chosen (this is in the implementation of internal::assign_traits at the top of the same file). Let's just say that here \a Vectorization has the value \a LinearVectorization, and \a Unrolling has the value \a NoUnrolling (the latter is obvious since our vectors have dynamic size so there's no way to unroll the loop at compile-time).
-So the partial specialization of ei_assign_impl that we're looking at is:
+So the partial specialization of internal::assign_impl that we're looking at is:
\code
-ei_assign_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
+internal::assign_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
\endcode
Here is how it's defined:
\code
template<typename Derived1, typename Derived2>
-struct ei_assign_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
+struct internal::assign_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
{
static void run(Derived1 &dst, const Derived2 &src)
{
const int size = dst.size();
- const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
- const int alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
- : ei_first_aligned(&dst.coeffRef(0), size);
+ const int packetSize = internal::packet_traits<typename Derived1::Scalar>::size;
+ const int alignedStart = internal::assign_traits<Derived1,Derived2>::DstIsAligned ? 0
+ : internal::first_aligned(&dst.coeffRef(0), size);
const int alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
for(int index = 0; index < alignedStart; index++)
@@ -351,7 +351,7 @@ struct ei_assign_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
for(int index = alignedStart; index < alignedEnd; index += packetSize)
{
- dst.template copyPacket<Derived2, Aligned, ei_assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
+ dst.template copyPacket<Derived2, Aligned, internal::assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
}
for(int index = alignedEnd; index < size; index++)
@@ -374,7 +374,7 @@ First, the vectorized part: the 48 first coefficients out of 50 will be copied b
\code
for(int index = alignedStart; index < alignedEnd; index += packetSize)
{
- dst.template copyPacket<Derived2, Aligned, ei_assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
+ dst.template copyPacket<Derived2, Aligned, internal::assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
}
\endcode
@@ -384,7 +384,7 @@ template<typename Derived>
template<typename OtherDerived, int StoreMode, int LoadMode>
inline void MatrixBase<Derived>::copyPacket(int index, const MatrixBase<OtherDerived>& other)
{
- ei_internal_assert(index >= 0 && index < size());
+ eigen_internal_assert(index >= 0 && index < size());
derived().template writePacket<StoreMode>(index,
other.derived().template packet<LoadMode>(index));
}
@@ -397,30 +397,30 @@ First, writePacket() here is a method on the left-hand side VectorXf. So we go t
template<int StoreMode>
inline void writePacket(int index, const PacketScalar& x)
{
- ei_pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
+ internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
}
\endcode
-Here, \a StoreMode is \a Aligned, indicating that we are doing a 128-bit-aligned write access, \a PacketScalar is a type representing a "SSE packet of 4 floats" and ei_pstoret is a function writing such a packet in memory. Their definitions are architecture-specific, we find them in src/Core/arch/SSE/PacketMath.h:
+Here, \a StoreMode is \a Aligned, indicating that we are doing a 128-bit-aligned write access, \a PacketScalar is a type representing a "SSE packet of 4 floats" and internal::pstoret is a function writing such a packet in memory. Their definitions are architecture-specific, we find them in src/Core/arch/SSE/PacketMath.h:
The line in src/Core/arch/SSE/PacketMath.h that determines the PacketScalar type (via a typedef in Matrix.h) is:
\code
-template<> struct ei_packet_traits<float> { typedef __m128 type; enum {size=4}; };
+template<> struct internal::packet_traits<float> { typedef __m128 type; enum {size=4}; };
\endcode
Here, __m128 is a SSE-specific type. Notice that the enum \a size here is what was used to define \a packetSize above.
-And here is the implementation of ei_pstoret:
+And here is the implementation of internal::pstoret:
\code
-template<> inline void ei_pstore(float* to, const __m128& from) { _mm_store_ps(to, from); }
+template<> inline void internal::pstore(float* to, const __m128& from) { _mm_store_ps(to, from); }
\endcode
-Here, __mm_store_ps is a SSE-specific intrinsic function, representing a single SSE instruction. The difference between ei_pstore and ei_pstoret is that ei_pstoret is a dispatcher handling both the aligned and unaligned cases, you find its definition in src/Core/GenericPacketMath.h:
+Here, __mm_store_ps is a SSE-specific intrinsic function, representing a single SSE instruction. The difference between internal::pstore and internal::pstoret is that internal::pstoret is a dispatcher handling both the aligned and unaligned cases, you find its definition in src/Core/GenericPacketMath.h:
\code
template<typename Scalar, typename Packet, int LoadMode>
-inline void ei_pstoret(Scalar* to, const Packet& from)
+inline void internal::pstoret(Scalar* to, const Packet& from)
{
if(LoadMode == Aligned)
- ei_pstore(to, from);
+ internal::pstore(to, from);
else
- ei_pstoreu(to, from);
+ internal::pstoreu(to, from);
}
\endcode
@@ -450,30 +450,30 @@ class Matrix
template<int LoadMode>
inline PacketScalar packet(int index) const
{
- return ei_ploadt<Scalar, LoadMode>(m_storage.data() + index);
+ return internal::ploadt<Scalar, LoadMode>(m_storage.data() + index);
}
};
\endcode
-We let you look up the definition of ei_ploadt in GenericPacketMath.h and the ei_pload in src/Core/arch/SSE/PacketMath.h. It is very similar to the above for ei_pstore.
+We let you look up the definition of internal::ploadt in GenericPacketMath.h and the internal::pload in src/Core/arch/SSE/PacketMath.h. It is very similar to the above for internal::pstore.
Let's go back to CwiseBinaryOp::packet(). Once the packets from the vectors \a v and \a w have been returned, what does this function do? It calls m_functor.packetOp() on them. What is m_functor? Here we must remember what particular template specialization of CwiseBinaryOp we're dealing with:
\code
-CwiseBinaryOp<ei_scalar_sum_op<float>, VectorXf, VectorXf>
+CwiseBinaryOp<internal::scalar_sum_op<float>, VectorXf, VectorXf>
\endcode
-So m_functor is an object of the empty class ei_scalar_sum_op<float>. As we mentioned above, don't worry about why we constructed an object of this empty class at all -- it's an implementation detail, the point is that some other functors need to store member data.
+So m_functor is an object of the empty class internal::scalar_sum_op<float>. As we mentioned above, don't worry about why we constructed an object of this empty class at all -- it's an implementation detail, the point is that some other functors need to store member data.
-Anyway, ei_scalar_sum_op is defined in src/Core/Functors.h:
+Anyway, internal::scalar_sum_op is defined in src/Core/Functors.h:
\code
-template<typename Scalar> struct ei_scalar_sum_op EIGEN_EMPTY_STRUCT {
+template<typename Scalar> struct internal::scalar_sum_op EIGEN_EMPTY_STRUCT {
inline const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; }
template<typename PacketScalar>
inline const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
- { return ei_padd(a,b); }
+ { return internal::padd(a,b); }
};
\endcode
-As you can see, all what packetOp() does is to call ei_padd on the two packets. Here is the definition of ei_padd from src/Core/arch/SSE/PacketMath.h:
+As you can see, all what packetOp() does is to call internal::padd on the two packets. Here is the definition of internal::padd from src/Core/arch/SSE/PacketMath.h:
\code
-template<> inline __m128 ei_padd(const __m128& a, const __m128& b) { return _mm_add_ps(a,b); }
+template<> inline __m128 internal::padd(const __m128& a, const __m128& b) { return _mm_add_ps(a,b); }
\endcode
Here, _mm_add_ps is a SSE-specific intrinsic function, representing a single SSE instruction.
@@ -481,7 +481,7 @@ To summarize, the loop
\code
for(int index = alignedStart; index < alignedEnd; index += packetSize)
{
- dst.template copyPacket<Derived2, Aligned, ei_assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
+ dst.template copyPacket<Derived2, Aligned, internal::assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
}
\endcode
has been compiled to the following code: for \a index going from 0 to the 11 ( = 48/4 - 1), read the i-th packet (of 4 floats) from the vector v and the i-th packet from the vector w using two __mm_load_ps SSE instructions, then add them together using a __mm_add_ps instruction, then store the result using a __mm_store_ps instruction.
diff --git a/doc/I11_Aliasing.dox b/doc/I11_Aliasing.dox
index 302575368..04a24bded 100644
--- a/doc/I11_Aliasing.dox
+++ b/doc/I11_Aliasing.dox
@@ -68,7 +68,7 @@ and exits with a message like
\verbatim
void Eigen::DenseBase<Derived>::checkTransposeAliasing(const OtherDerived&) const
[with OtherDerived = Eigen::Transpose<Eigen::Matrix<int, 2, 2, 0, 2, 2> >, Derived = Eigen::Matrix<int, 2, 2, 0, 2, 2>]:
-Assertion `(!ei_check_transpose_aliasing_selector<Scalar,ei_blas_traits<Derived>::IsTransposed,OtherDerived>::run(ei_extract_data(derived()), other))
+Assertion `(!internal::check_transpose_aliasing_selector<Scalar,internal::blas_traits<Derived>::IsTransposed,OtherDerived>::run(internal::extract_data(derived()), other))
&& "aliasing detected during tranposition, use transposeInPlace() or evaluate the rhs into a temporary using .eval()"' failed.
\endverbatim
diff --git a/doc/I13_FunctionsTakingEigenTypes.dox b/doc/I13_FunctionsTakingEigenTypes.dox
index fea6b940f..8f2542632 100644
--- a/doc/I13_FunctionsTakingEigenTypes.dox
+++ b/doc/I13_FunctionsTakingEigenTypes.dox
@@ -111,8 +111,8 @@ The solution which is preferred at the moment is based on a little \em hack. One
template <typename Derived, typename OtherDerived>
void cov(const MatrixBase<Derived>& x, const MatrixBase<Derived>& y, MatrixBase<OtherDerived> EIGEN_REF_TO_TEMPORARY C)
{
- typedef typename ei_traits<Derived>::Scalar Scalar;
- typedef typename ei_plain_row_type<Derived>::type RowVectorType;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::plain_row_type<Derived>::type RowVectorType;
const Scalar num_observations = static_cast<Scalar>(x.rows());
@@ -141,8 +141,8 @@ This is not the case anymore, when we are using an implementation taking MatrixB
template <typename Derived, typename OtherDerived>
void cov(const MatrixBase<Derived>& x, const MatrixBase<Derived>& y, MatrixBase<OtherDerived> EIGEN_REF_TO_TEMPORARY C_)
{
- typedef typename ei_traits<Derived>::Scalar Scalar;
- typedef typename ei_plain_row_type<Derived>::type RowVectorType;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::plain_row_type<Derived>::type RowVectorType;
const Scalar num_observations = static_cast<Scalar>(x.rows());
diff --git a/doc/snippets/Tridiagonalization_decomposeInPlace.cpp b/doc/snippets/Tridiagonalization_decomposeInPlace.cpp
index 1d0961aee..93dcfca1d 100644
--- a/doc/snippets/Tridiagonalization_decomposeInPlace.cpp
+++ b/doc/snippets/Tridiagonalization_decomposeInPlace.cpp
@@ -4,7 +4,7 @@ cout << "Here is a random symmetric 5x5 matrix:" << endl << A << endl << endl;
VectorXd diag(5);
VectorXd subdiag(4);
-ei_tridiagonalization_inplace(A, diag, subdiag, true);
+internal::tridiagonalization_inplace(A, diag, subdiag, true);
cout << "The orthogonal matrix Q is:" << endl << A << endl;
cout << "The diagonal of the tridiagonal matrix T is:" << endl << diag << endl;
cout << "The subdiagonal of the tridiagonal matrix T is:" << endl << subdiag << endl;