aboutsummaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2014-08-29 15:20:31 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2014-08-29 15:20:31 +0200
commit124d12a915129bc36ebe87f483712505a11dc91f (patch)
tree5c0b12148e55cfbfa2c2e69368d982774d96193f /test
parentf29dbec321617d46287c4415889c4485ad70bea3 (diff)
parentaec3d90ca65528fdface6013ccbcc33b04ada867 (diff)
merge default branch
Diffstat (limited to 'test')
-rw-r--r--test/CMakeLists.txt8
-rw-r--r--test/basicstuff.cpp34
-rw-r--r--test/ctorleak.cpp51
-rw-r--r--test/dynalloc.cpp33
-rw-r--r--test/eigensolver_selfadjoint.cpp43
-rw-r--r--test/main.h20
-rw-r--r--test/simplicial_cholesky.cpp41
-rw-r--r--test/sparseqr.cpp2
8 files changed, 189 insertions, 43 deletions
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 7555aaa38..6446b8bb0 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -158,7 +158,9 @@ ei_add_test(basicstuff)
ei_add_test(linearstructure)
ei_add_test(integer_types)
ei_add_test(unalignedcount)
-ei_add_test(exceptions)
+if(NOT EIGEN_TEST_NO_EXCEPTIONS)
+ ei_add_test(exceptions)
+endif()
ei_add_test(redux)
ei_add_test(visitor)
ei_add_test(block)
@@ -242,7 +244,9 @@ ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}")
ei_add_test(zerosized)
ei_add_test(dontalign)
ei_add_test(evaluators)
-ei_add_test(sizeoverflow)
+if(NOT EIGEN_TEST_NO_EXCEPTIONS)
+ ei_add_test(sizeoverflow)
+endif()
ei_add_test(prec_inverse_4x4)
ei_add_test(vectorwiseop)
ei_add_test(special_numbers)
diff --git a/test/basicstuff.cpp b/test/basicstuff.cpp
index 56370d591..3d7d74d4e 100644
--- a/test/basicstuff.cpp
+++ b/test/basicstuff.cpp
@@ -183,6 +183,7 @@ void fixedSizeMatrixConstruction()
Scalar raw[4];
for(int k=0; k<4; ++k)
raw[k] = internal::random<Scalar>();
+
{
Matrix<Scalar,4,1> m(raw);
Array<Scalar,4,1> a(raw);
@@ -200,18 +201,40 @@ void fixedSizeMatrixConstruction()
VERIFY((a==Array<Scalar,3,1>(raw[0],raw[1],raw[2])).all());
}
{
- Matrix<Scalar,2,1> m(raw);
- Array<Scalar,2,1> a(raw);
+ Matrix<Scalar,2,1> m(raw), m2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) );
+ Array<Scalar,2,1> a(raw), a2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) );
for(int k=0; k<2; ++k) VERIFY(m(k) == raw[k]);
for(int k=0; k<2; ++k) VERIFY(a(k) == raw[k]);
VERIFY_IS_EQUAL(m,(Matrix<Scalar,2,1>(raw[0],raw[1])));
VERIFY((a==Array<Scalar,2,1>(raw[0],raw[1])).all());
+ for(int k=0; k<2; ++k) VERIFY(m2(k) == DenseIndex(raw[k]));
+ for(int k=0; k<2; ++k) VERIFY(a2(k) == DenseIndex(raw[k]));
+ }
+ {
+ Matrix<Scalar,1,2> m(raw),
+ m2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) ),
+ m3( (int(raw[0])), (int(raw[1])) ),
+ m4( (float(raw[0])), (float(raw[1])) );
+ Array<Scalar,1,2> a(raw), a2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) );
+ for(int k=0; k<2; ++k) VERIFY(m(k) == raw[k]);
+ for(int k=0; k<2; ++k) VERIFY(a(k) == raw[k]);
+ VERIFY_IS_EQUAL(m,(Matrix<Scalar,1,2>(raw[0],raw[1])));
+ VERIFY((a==Array<Scalar,1,2>(raw[0],raw[1])).all());
+ for(int k=0; k<2; ++k) VERIFY(m2(k) == DenseIndex(raw[k]));
+ for(int k=0; k<2; ++k) VERIFY(a2(k) == DenseIndex(raw[k]));
+ for(int k=0; k<2; ++k) VERIFY(m3(k) == int(raw[k]));
+ for(int k=0; k<2; ++k) VERIFY(m4(k) == float(raw[k]));
}
{
- Matrix<Scalar,1,1> m(raw);
- Array<Scalar,1,1> a(raw);
+ Matrix<Scalar,1,1> m(raw), m1(raw[0]), m2( (DenseIndex(raw[0])) ), m3( (int(raw[0])) );
+ Array<Scalar,1,1> a(raw), a1(raw[0]), a2( (DenseIndex(raw[0])) );
VERIFY(m(0) == raw[0]);
VERIFY(a(0) == raw[0]);
+ VERIFY(m1(0) == raw[0]);
+ VERIFY(a1(0) == raw[0]);
+ VERIFY(m2(0) == DenseIndex(raw[0]));
+ VERIFY(a2(0) == DenseIndex(raw[0]));
+ VERIFY(m3(0) == int(raw[0]));
VERIFY_IS_EQUAL(m,(Matrix<Scalar,1,1>(raw[0])));
VERIFY((a==Array<Scalar,1,1>(raw[0])).all());
}
@@ -233,9 +256,10 @@ void test_basicstuff()
}
CALL_SUBTEST_1(fixedSizeMatrixConstruction<unsigned char>());
- CALL_SUBTEST_1(fixedSizeMatrixConstruction<double>());
+ CALL_SUBTEST_1(fixedSizeMatrixConstruction<float>());
CALL_SUBTEST_1(fixedSizeMatrixConstruction<double>());
CALL_SUBTEST_1(fixedSizeMatrixConstruction<int>());
+ CALL_SUBTEST_1(fixedSizeMatrixConstruction<long int>());
CALL_SUBTEST_1(fixedSizeMatrixConstruction<std::ptrdiff_t>());
CALL_SUBTEST_2(casting());
diff --git a/test/ctorleak.cpp b/test/ctorleak.cpp
new file mode 100644
index 000000000..145d91be4
--- /dev/null
+++ b/test/ctorleak.cpp
@@ -0,0 +1,51 @@
+#include "main.h"
+
+#include <exception> // std::exception
+
+struct Foo
+{
+ static unsigned object_count;
+ static unsigned object_limit;
+ int dummy;
+
+ Foo()
+ {
+#ifdef EIGEN_EXCEPTIONS
+ // TODO: Is this the correct way to handle this?
+ if (Foo::object_count > Foo::object_limit) { throw Foo::Fail(); }
+#endif
+ ++Foo::object_count;
+ }
+
+ ~Foo()
+ {
+ --Foo::object_count;
+ }
+
+ class Fail : public std::exception {};
+};
+
+unsigned Foo::object_count = 0;
+unsigned Foo::object_limit = 0;
+
+
+void test_ctorleak()
+{
+ typedef DenseIndex Index;
+ Foo::object_count = 0;
+ for(int i = 0; i < g_repeat; i++) {
+ Index rows = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE);
+ Foo::object_limit = internal::random(0, rows*cols - 2);
+#ifdef EIGEN_EXCEPTIONS
+ try
+ {
+#endif
+ Matrix<Foo, Dynamic, Dynamic> m(rows, cols);
+#ifdef EIGEN_EXCEPTIONS
+ VERIFY(false); // not reached if exceptions are enabled
+ }
+ catch (const Foo::Fail&) { /* ignore */ }
+#endif
+ }
+ VERIFY_IS_EQUAL(static_cast<unsigned>(0), Foo::object_count);
+}
diff --git a/test/dynalloc.cpp b/test/dynalloc.cpp
index c98cc80f0..1190eb9cd 100644
--- a/test/dynalloc.cpp
+++ b/test/dynalloc.cpp
@@ -55,7 +55,7 @@ void check_aligned_new()
void check_aligned_stack_alloc()
{
- for(int i = 1; i < 1000; i++)
+ for(int i = 1; i < 400; i++)
{
ei_declare_aligned_stack_constructed_variable(float,p,i,0);
VERIFY(size_t(p)%ALIGNMENT==0);
@@ -93,6 +93,32 @@ template<typename T> void check_dynaligned()
}
}
+template<typename T> void check_custom_new_delete()
+{
+ {
+ T* t = new T;
+ delete t;
+ }
+
+ {
+ std::size_t N = internal::random<std::size_t>(1,10);
+ T* t = new T[N];
+ delete[] t;
+ }
+
+#ifdef EIGEN_ALIGN
+ {
+ T* t = static_cast<T *>((T::operator new)(sizeof(T)));
+ (T::operator delete)(t, sizeof(T));
+ }
+
+ {
+ T* t = static_cast<T *>((T::operator new)(sizeof(T)));
+ (T::operator delete)(t);
+ }
+#endif
+}
+
void test_dynalloc()
{
// low level dynamic memory allocation
@@ -109,6 +135,11 @@ void test_dynalloc()
CALL_SUBTEST(check_dynaligned<Vector4d>() );
CALL_SUBTEST(check_dynaligned<Vector4i>() );
CALL_SUBTEST(check_dynaligned<Vector8f>() );
+
+ CALL_SUBTEST( check_custom_new_delete<Vector4f>() );
+ CALL_SUBTEST( check_custom_new_delete<Vector2f>() );
+ CALL_SUBTEST( check_custom_new_delete<Matrix4f>() );
+ CALL_SUBTEST( check_custom_new_delete<MatrixXi>() );
}
// check static allocation, who knows ?
diff --git a/test/eigensolver_selfadjoint.cpp b/test/eigensolver_selfadjoint.cpp
index 06a6a8654..3851f9df2 100644
--- a/test/eigensolver_selfadjoint.cpp
+++ b/test/eigensolver_selfadjoint.cpp
@@ -29,7 +29,21 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
MatrixType a = MatrixType::Random(rows,cols);
MatrixType a1 = MatrixType::Random(rows,cols);
MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1;
+ MatrixType symmC = symmA;
+
+ // randomly nullify some rows/columns
+ {
+ Index count = 1;//internal::random<Index>(-cols,cols);
+ for(Index k=0; k<count; ++k)
+ {
+ Index i = internal::random<Index>(0,cols-1);
+ symmA.row(i).setZero();
+ symmA.col(i).setZero();
+ }
+ }
+
symmA.template triangularView<StrictlyUpper>().setZero();
+ symmC.template triangularView<StrictlyUpper>().setZero();
MatrixType b = MatrixType::Random(rows,cols);
MatrixType b1 = MatrixType::Random(rows,cols);
@@ -40,7 +54,7 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
SelfAdjointEigenSolver<MatrixType> eiDirect;
eiDirect.computeDirect(symmA);
// generalized eigen pb
- GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmA, symmB);
+ GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmC, symmB);
VERIFY_IS_EQUAL(eiSymm.info(), Success);
VERIFY((symmA.template selfadjointView<Lower>() * eiSymm.eigenvectors()).isApprox(
@@ -57,27 +71,28 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues());
// generalized eigen problem Ax = lBx
- eiSymmGen.compute(symmA, symmB,Ax_lBx);
+ eiSymmGen.compute(symmC, symmB,Ax_lBx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
- VERIFY((symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
+ VERIFY((symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
// generalized eigen problem BAx = lx
- eiSymmGen.compute(symmA, symmB,BAx_lx);
+ eiSymmGen.compute(symmC, symmB,BAx_lx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
- VERIFY((symmB.template selfadjointView<Lower>() * (symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
+ VERIFY((symmB.template selfadjointView<Lower>() * (symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
(eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
// generalized eigen problem ABx = lx
- eiSymmGen.compute(symmA, symmB,ABx_lx);
+ eiSymmGen.compute(symmC, symmB,ABx_lx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
- VERIFY((symmA.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
+ VERIFY((symmC.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
(eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
+ eiSymm.compute(symmC);
MatrixType sqrtSymmA = eiSymm.operatorSqrt();
- VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
- VERIFY_IS_APPROX(sqrtSymmA, symmA.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());
+ VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
+ VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());
MatrixType id = MatrixType::Identity(rows, cols);
VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1));
@@ -95,9 +110,9 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());
// test Tridiagonalization's methods
- Tridiagonalization<MatrixType> tridiag(symmA);
+ Tridiagonalization<MatrixType> tridiag(symmC);
// FIXME tridiag.matrixQ().adjoint() does not work
- VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());
+ VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());
// Test computation of eigenvalues from tridiagonal matrix
if(rows > 1)
@@ -111,8 +126,8 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
if (rows > 1)
{
// Test matrix with NaN
- symmA(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
- SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmA);
+ symmC(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
+ SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmC);
VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence);
}
}
@@ -122,8 +137,10 @@ void test_eigensolver_selfadjoint()
int s = 0;
for(int i = 0; i < g_repeat; i++) {
// very important to test 3x3 and 2x2 matrices since we provide special paths for them
+ CALL_SUBTEST_1( selfadjointeigensolver(Matrix2f()) );
CALL_SUBTEST_1( selfadjointeigensolver(Matrix2d()) );
CALL_SUBTEST_1( selfadjointeigensolver(Matrix3f()) );
+ CALL_SUBTEST_1( selfadjointeigensolver(Matrix3d()) );
CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) );
diff --git a/test/main.h b/test/main.h
index e89b5a305..b8854a1c3 100644
--- a/test/main.h
+++ b/test/main.h
@@ -79,6 +79,10 @@ namespace Eigen
#define EIGEN_DEFAULT_IO_FORMAT IOFormat(4, 0, " ", "\n", "", "", "", "")
+#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__)
+ #define EIGEN_EXCEPTIONS
+#endif
+
#ifndef EIGEN_NO_ASSERTION_CHECKING
namespace Eigen
@@ -120,13 +124,14 @@ namespace Eigen
if(report_on_cerr_on_assert_failure) \
std::cerr << #a << " " __FILE__ << "(" << __LINE__ << ")\n"; \
Eigen::no_more_assert = true; \
- throw Eigen::eigen_assert_exception(); \
+ EIGEN_THROW_X(Eigen::eigen_assert_exception()); \
} \
else if (Eigen::internal::push_assert) \
{ \
eigen_assert_list.push_back(std::string(EI_PP_MAKE_STRING(__FILE__) " (" EI_PP_MAKE_STRING(__LINE__) ") : " #a) ); \
}
+ #ifdef EIGEN_EXCEPTIONS
#define VERIFY_RAISES_ASSERT(a) \
{ \
Eigen::no_more_assert = false; \
@@ -145,6 +150,7 @@ namespace Eigen
Eigen::report_on_cerr_on_assert_failure = true; \
Eigen::internal::push_assert = false; \
}
+ #endif //EIGEN_EXCEPTIONS
#elif !defined(__CUDACC__) // EIGEN_DEBUG_ASSERTS
// see bug 89. The copy_bool here is working around a bug in gcc <= 4.3
@@ -155,9 +161,10 @@ namespace Eigen
if(report_on_cerr_on_assert_failure) \
eigen_plain_assert(a); \
else \
- throw Eigen::eigen_assert_exception(); \
+ EIGEN_THROW_X(Eigen::eigen_assert_exception()); \
}
- #define VERIFY_RAISES_ASSERT(a) { \
+ #ifdef EIGEN_EXCEPTIONS
+ #define VERIFY_RAISES_ASSERT(a) { \
Eigen::no_more_assert = false; \
Eigen::report_on_cerr_on_assert_failure = false; \
try { \
@@ -167,9 +174,14 @@ namespace Eigen
catch (Eigen::eigen_assert_exception&) { VERIFY(true); } \
Eigen::report_on_cerr_on_assert_failure = true; \
}
-
+ #endif //EIGEN_EXCEPTIONS
#endif // EIGEN_DEBUG_ASSERTS
+#ifndef VERIFY_RAISES_ASSERT
+ #define VERIFY_RAISES_ASSERT(a) \
+ std::cout << "Can't VERIFY_RAISES_ASSERT( " #a " ) with exceptions disabled\n";
+#endif
+
#if !defined(__CUDACC__)
#define EIGEN_USE_CUSTOM_ASSERT
#endif
diff --git a/test/simplicial_cholesky.cpp b/test/simplicial_cholesky.cpp
index e93a52e9c..786468421 100644
--- a/test/simplicial_cholesky.cpp
+++ b/test/simplicial_cholesky.cpp
@@ -11,26 +11,31 @@
template<typename T> void test_simplicial_cholesky_T()
{
- SimplicialCholesky<SparseMatrix<T>, Lower> chol_colmajor_lower;
- SimplicialCholesky<SparseMatrix<T>, Upper> chol_colmajor_upper;
- SimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower;
- SimplicialLDLT<SparseMatrix<T>, Upper> llt_colmajor_upper;
- SimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower;
- SimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper;
+ SimplicialCholesky<SparseMatrix<T>, Lower> chol_colmajor_lower_amd;
+ SimplicialCholesky<SparseMatrix<T>, Upper> chol_colmajor_upper_amd;
+ SimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower_amd;
+ SimplicialLLT<SparseMatrix<T>, Upper> llt_colmajor_upper_amd;
+ SimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower_amd;
+ SimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper_amd;
+ SimplicialLDLT<SparseMatrix<T>, Lower, NaturalOrdering<int> > ldlt_colmajor_lower_nat;
+ SimplicialLDLT<SparseMatrix<T>, Upper, NaturalOrdering<int> > ldlt_colmajor_upper_nat;
- check_sparse_spd_solving(chol_colmajor_lower);
- check_sparse_spd_solving(chol_colmajor_upper);
- check_sparse_spd_solving(llt_colmajor_lower);
- check_sparse_spd_solving(llt_colmajor_upper);
- check_sparse_spd_solving(ldlt_colmajor_lower);
- check_sparse_spd_solving(ldlt_colmajor_upper);
+ check_sparse_spd_solving(chol_colmajor_lower_amd);
+ check_sparse_spd_solving(chol_colmajor_upper_amd);
+ check_sparse_spd_solving(llt_colmajor_lower_amd);
+ check_sparse_spd_solving(llt_colmajor_upper_amd);
+ check_sparse_spd_solving(ldlt_colmajor_lower_amd);
+ check_sparse_spd_solving(ldlt_colmajor_upper_amd);
- check_sparse_spd_determinant(chol_colmajor_lower);
- check_sparse_spd_determinant(chol_colmajor_upper);
- check_sparse_spd_determinant(llt_colmajor_lower);
- check_sparse_spd_determinant(llt_colmajor_upper);
- check_sparse_spd_determinant(ldlt_colmajor_lower);
- check_sparse_spd_determinant(ldlt_colmajor_upper);
+ check_sparse_spd_determinant(chol_colmajor_lower_amd);
+ check_sparse_spd_determinant(chol_colmajor_upper_amd);
+ check_sparse_spd_determinant(llt_colmajor_lower_amd);
+ check_sparse_spd_determinant(llt_colmajor_upper_amd);
+ check_sparse_spd_determinant(ldlt_colmajor_lower_amd);
+ check_sparse_spd_determinant(ldlt_colmajor_upper_amd);
+
+ check_sparse_spd_solving(ldlt_colmajor_lower_nat);
+ check_sparse_spd_solving(ldlt_colmajor_upper_nat);
}
void test_simplicial_cholesky()
diff --git a/test/sparseqr.cpp b/test/sparseqr.cpp
index 1fe4a98ee..8e6887dd3 100644
--- a/test/sparseqr.cpp
+++ b/test/sparseqr.cpp
@@ -54,6 +54,8 @@ template<typename Scalar> void test_sparseqr_scalar()
b = dA * DenseVector::Random(A.cols());
solver.compute(A);
+ if(internal::random<float>(0,1)>0.5)
+ solver.factorize(A); // this checks that calling analyzePattern is not needed if the pattern do not change.
if (solver.info() != Success)
{
std::cerr << "sparse QR factorization failed\n";