Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/eigen3/Eigen/src/SVD/BDCSVD.h was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 // This file is part of Eigen, a lightweight C++ template library
0002 // for linear algebra.
0003 // 
0004 // We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
0005 // research report written by Ming Gu and Stanley C.Eisenstat
0006 // The code variable names correspond to the names they used in their 
0007 // report
0008 //
0009 // Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
0010 // Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
0011 // Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
0012 // Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
0013 // Copyright (C) 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
0014 // Copyright (C) 2014-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
0015 //
0016 // Source Code Form is subject to the terms of the Mozilla
0017 // Public License v. 2.0. If a copy of the MPL was not distributed
0018 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
0019 
0020 #ifndef EIGEN_BDCSVD_H
0021 #define EIGEN_BDCSVD_H
0022 // #define EIGEN_BDCSVD_DEBUG_VERBOSE
0023 // #define EIGEN_BDCSVD_SANITY_CHECKS
0024 
0025 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0026 #undef eigen_internal_assert
0027 #define eigen_internal_assert(X) assert(X);
0028 #endif
0029 
0030 namespace Eigen {
0031 
0032 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
0033 IOFormat bdcsvdfmt(8, 0, ", ", "\n", "  [", "]");
0034 #endif
0035   
0036 template<typename _MatrixType> class BDCSVD;
0037 
0038 namespace internal {
0039 
0040 template<typename _MatrixType> 
0041 struct traits<BDCSVD<_MatrixType> >
0042         : traits<_MatrixType>
0043 {
0044   typedef _MatrixType MatrixType;
0045 };  
0046 
0047 } // end namespace internal
0048   
0049   
0050 /** \ingroup SVD_Module
0051  *
0052  *
0053  * \class BDCSVD
0054  *
0055  * \brief class Bidiagonal Divide and Conquer SVD
0056  *
0057  * \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition
0058  *
0059  * This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization,
0060  * and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD.
0061  * You can control the switching size with the setSwitchSize() method, default is 16.
0062  * For small matrice (<16), it is thus preferable to directly use JacobiSVD. For larger ones, BDCSVD is highly
0063  * recommended and can several order of magnitude faster.
0064  *
0065  * \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations.
0066  * For instance, this concerns Intel's compiler (ICC), which performs such optimization by default unless
0067  * you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will
0068  * significantly degrade the accuracy.
0069  *
0070  * \sa class JacobiSVD
0071  */
0072 template<typename _MatrixType> 
0073 class BDCSVD : public SVDBase<BDCSVD<_MatrixType> >
0074 {
0075   typedef SVDBase<BDCSVD> Base;
0076     
0077 public:
0078   using Base::rows;
0079   using Base::cols;
0080   using Base::computeU;
0081   using Base::computeV;
0082   
0083   typedef _MatrixType MatrixType;
0084   typedef typename MatrixType::Scalar Scalar;
0085   typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
0086   typedef typename NumTraits<RealScalar>::Literal Literal;
0087   enum {
0088     RowsAtCompileTime = MatrixType::RowsAtCompileTime, 
0089     ColsAtCompileTime = MatrixType::ColsAtCompileTime, 
0090     DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), 
0091     MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, 
0092     MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, 
0093     MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), 
0094     MatrixOptions = MatrixType::Options
0095   };
0096 
0097   typedef typename Base::MatrixUType MatrixUType;
0098   typedef typename Base::MatrixVType MatrixVType;
0099   typedef typename Base::SingularValuesType SingularValuesType;
0100   
0101   typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> MatrixX;
0102   typedef Matrix<RealScalar, Dynamic, Dynamic, ColMajor> MatrixXr;
0103   typedef Matrix<RealScalar, Dynamic, 1> VectorType;
0104   typedef Array<RealScalar, Dynamic, 1> ArrayXr;
0105   typedef Array<Index,1,Dynamic> ArrayXi;
0106   typedef Ref<ArrayXr> ArrayRef;
0107   typedef Ref<ArrayXi> IndicesRef;
0108 
0109   /** \brief Default Constructor.
0110    *
0111    * The default constructor is useful in cases in which the user intends to
0112    * perform decompositions via BDCSVD::compute(const MatrixType&).
0113    */
0114   BDCSVD() : m_algoswap(16), m_isTranspose(false), m_compU(false), m_compV(false), m_numIters(0)
0115   {}
0116 
0117 
0118   /** \brief Default Constructor with memory preallocation
0119    *
0120    * Like the default constructor but with preallocation of the internal data
0121    * according to the specified problem size.
0122    * \sa BDCSVD()
0123    */
0124   BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
0125     : m_algoswap(16), m_numIters(0)
0126   {
0127     allocate(rows, cols, computationOptions);
0128   }
0129 
0130   /** \brief Constructor performing the decomposition of given matrix.
0131    *
0132    * \param matrix the matrix to decompose
0133    * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
0134    *                           By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, 
0135    *                           #ComputeFullV, #ComputeThinV.
0136    *
0137    * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
0138    * available with the (non - default) FullPivHouseholderQR preconditioner.
0139    */
0140   BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
0141     : m_algoswap(16), m_numIters(0)
0142   {
0143     compute(matrix, computationOptions);
0144   }
0145 
0146   ~BDCSVD() 
0147   {
0148   }
0149   
0150   /** \brief Method performing the decomposition of given matrix using custom options.
0151    *
0152    * \param matrix the matrix to decompose
0153    * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
0154    *                           By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, 
0155    *                           #ComputeFullV, #ComputeThinV.
0156    *
0157    * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
0158    * available with the (non - default) FullPivHouseholderQR preconditioner.
0159    */
0160   BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
0161 
0162   /** \brief Method performing the decomposition of given matrix using current options.
0163    *
0164    * \param matrix the matrix to decompose
0165    *
0166    * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
0167    */
0168   BDCSVD& compute(const MatrixType& matrix)
0169   {
0170     return compute(matrix, this->m_computationOptions);
0171   }
0172 
0173   void setSwitchSize(int s) 
0174   {
0175     eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3");
0176     m_algoswap = s;
0177   }
0178  
0179 private:
0180   void allocate(Index rows, Index cols, unsigned int computationOptions);
0181   void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift);
0182   void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V);
0183   void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus);
0184   void perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat);
0185   void computeSingVecs(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V);
0186   void deflation43(Index firstCol, Index shift, Index i, Index size);
0187   void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
0188   void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
0189   template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
0190   void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev);
0191   void structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1);
0192   static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift);
0193 
0194 protected:
0195   MatrixXr m_naiveU, m_naiveV;
0196   MatrixXr m_computed;
0197   Index m_nRec;
0198   ArrayXr m_workspace;
0199   ArrayXi m_workspaceI;
0200   int m_algoswap;
0201   bool m_isTranspose, m_compU, m_compV;
0202   
0203   using Base::m_singularValues;
0204   using Base::m_diagSize;
0205   using Base::m_computeFullU;
0206   using Base::m_computeFullV;
0207   using Base::m_computeThinU;
0208   using Base::m_computeThinV;
0209   using Base::m_matrixU;
0210   using Base::m_matrixV;
0211   using Base::m_info;
0212   using Base::m_isInitialized;
0213   using Base::m_nonzeroSingularValues;
0214 
0215 public:  
0216   int m_numIters;
0217 }; //end class BDCSVD
0218 
0219 
0220 // Method to allocate and initialize matrix and attributes
0221 template<typename MatrixType>
0222 void BDCSVD<MatrixType>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
0223 {
0224   m_isTranspose = (cols > rows);
0225 
0226   if (Base::allocate(rows, cols, computationOptions))
0227     return;
0228   
0229   m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize );
0230   m_compU = computeV();
0231   m_compV = computeU();
0232   if (m_isTranspose)
0233     std::swap(m_compU, m_compV);
0234   
0235   if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 );
0236   else         m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 );
0237   
0238   if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize);
0239   
0240   m_workspace.resize((m_diagSize+1)*(m_diagSize+1)*3);
0241   m_workspaceI.resize(3*m_diagSize);
0242 }// end allocate
0243 
0244 template<typename MatrixType>
0245 BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions) 
0246 {
0247 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
0248   std::cout << "\n\n\n======================================================================================================================\n\n\n";
0249 #endif
0250   allocate(matrix.rows(), matrix.cols(), computationOptions);
0251   using std::abs;
0252 
0253   const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
0254   
0255   //**** step -1 - If the problem is too small, directly falls back to JacobiSVD and return
0256   if(matrix.cols() < m_algoswap)
0257   {
0258     // FIXME this line involves temporaries
0259     JacobiSVD<MatrixType> jsvd(matrix,computationOptions);
0260     m_isInitialized = true;
0261     m_info = jsvd.info();
0262     if (m_info == Success || m_info == NoConvergence) {
0263       if(computeU()) m_matrixU = jsvd.matrixU();
0264       if(computeV()) m_matrixV = jsvd.matrixV();
0265       m_singularValues = jsvd.singularValues();
0266       m_nonzeroSingularValues = jsvd.nonzeroSingularValues();
0267     }
0268     return *this;
0269   }
0270   
0271   //**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows
0272   RealScalar scale = matrix.cwiseAbs().template maxCoeff<PropagateNaN>();
0273   if (!(numext::isfinite)(scale)) {
0274     m_isInitialized = true;
0275     m_info = InvalidInput;
0276     return *this;
0277   }
0278 
0279   if(scale==Literal(0)) scale = Literal(1);
0280   MatrixX copy;
0281   if (m_isTranspose) copy = matrix.adjoint()/scale;
0282   else               copy = matrix/scale;
0283   
0284   //**** step 1 - Bidiagonalization
0285   // FIXME this line involves temporaries
0286   internal::UpperBidiagonalization<MatrixX> bid(copy);
0287 
0288   //**** step 2 - Divide & Conquer
0289   m_naiveU.setZero();
0290   m_naiveV.setZero();
0291   // FIXME this line involves a temporary matrix
0292   m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose();
0293   m_computed.template bottomRows<1>().setZero();
0294   divide(0, m_diagSize - 1, 0, 0, 0);
0295   if (m_info != Success && m_info != NoConvergence) {
0296     m_isInitialized = true;
0297     return *this;
0298   }
0299     
0300   //**** step 3 - Copy singular values and vectors
0301   for (int i=0; i<m_diagSize; i++)
0302   {
0303     RealScalar a = abs(m_computed.coeff(i, i));
0304     m_singularValues.coeffRef(i) = a * scale;
0305     if (a<considerZero)
0306     {
0307       m_nonzeroSingularValues = i;
0308       m_singularValues.tail(m_diagSize - i - 1).setZero();
0309       break;
0310     }
0311     else if (i == m_diagSize - 1)
0312     {
0313       m_nonzeroSingularValues = i + 1;
0314       break;
0315     }
0316   }
0317 
0318 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
0319 //   std::cout << "m_naiveU\n" << m_naiveU << "\n\n";
0320 //   std::cout << "m_naiveV\n" << m_naiveV << "\n\n";
0321 #endif
0322   if(m_isTranspose) copyUV(bid.householderV(), bid.householderU(), m_naiveV, m_naiveU);
0323   else              copyUV(bid.householderU(), bid.householderV(), m_naiveU, m_naiveV);
0324 
0325   m_isInitialized = true;
0326   return *this;
0327 }// end compute
0328 
0329 
0330 template<typename MatrixType>
0331 template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
0332 void BDCSVD<MatrixType>::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV)
0333 {
0334   // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa
0335   if (computeU())
0336   {
0337     Index Ucols = m_computeThinU ? m_diagSize : householderU.cols();
0338     m_matrixU = MatrixX::Identity(householderU.cols(), Ucols);
0339     m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
0340     householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer
0341   }
0342   if (computeV())
0343   {
0344     Index Vcols = m_computeThinV ? m_diagSize : householderV.cols();
0345     m_matrixV = MatrixX::Identity(householderV.cols(), Vcols);
0346     m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
0347     householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer
0348   }
0349 }
0350 
0351 /** \internal
0352   * Performs A = A * B exploiting the special structure of the matrix A. Splitting A as:
0353   *  A = [A1]
0354   *      [A2]
0355   * such that A1.rows()==n1, then we assume that at least half of the columns of A1 and A2 are zeros.
0356   * We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large
0357   * enough.
0358   */
0359 template<typename MatrixType>
0360 void BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1)
0361 {
0362   Index n = A.rows();
0363   if(n>100)
0364   {
0365     // If the matrices are large enough, let's exploit the sparse structure of A by
0366     // splitting it in half (wrt n1), and packing the non-zero columns.
0367     Index n2 = n - n1;
0368     Map<MatrixXr> A1(m_workspace.data()      , n1, n);
0369     Map<MatrixXr> A2(m_workspace.data()+ n1*n, n2, n);
0370     Map<MatrixXr> B1(m_workspace.data()+  n*n, n,  n);
0371     Map<MatrixXr> B2(m_workspace.data()+2*n*n, n,  n);
0372     Index k1=0, k2=0;
0373     for(Index j=0; j<n; ++j)
0374     {
0375       if( (A.col(j).head(n1).array()!=Literal(0)).any() )
0376       {
0377         A1.col(k1) = A.col(j).head(n1);
0378         B1.row(k1) = B.row(j);
0379         ++k1;
0380       }
0381       if( (A.col(j).tail(n2).array()!=Literal(0)).any() )
0382       {
0383         A2.col(k2) = A.col(j).tail(n2);
0384         B2.row(k2) = B.row(j);
0385         ++k2;
0386       }
0387     }
0388   
0389     A.topRows(n1).noalias()    = A1.leftCols(k1) * B1.topRows(k1);
0390     A.bottomRows(n2).noalias() = A2.leftCols(k2) * B2.topRows(k2);
0391   }
0392   else
0393   {
0394     Map<MatrixXr,Aligned> tmp(m_workspace.data(),n,n);
0395     tmp.noalias() = A*B;
0396     A = tmp;
0397   }
0398 }
0399 
0400 // The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the 
0401 // place of the submatrix we are currently working on.
0402 
0403 //@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
0404 //@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; 
0405 // lastCol + 1 - firstCol is the size of the submatrix.
0406 //@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
0407 //@param firstRowW : Same as firstRowW with the column.
0408 //@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix 
0409 // to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
0410 template<typename MatrixType>
0411 void BDCSVD<MatrixType>::divide(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
0412 {
0413   // requires rows = cols + 1;
0414   using std::pow;
0415   using std::sqrt;
0416   using std::abs;
0417   const Index n = lastCol - firstCol + 1;
0418   const Index k = n/2;
0419   const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
0420   RealScalar alphaK;
0421   RealScalar betaK; 
0422   RealScalar r0; 
0423   RealScalar lambda, phi, c0, s0;
0424   VectorType l, f;
0425   // We use the other algorithm which is more efficient for small 
0426   // matrices.
0427   if (n < m_algoswap)
0428   {
0429     // FIXME this line involves temporaries
0430     JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0));
0431     m_info = b.info();
0432     if (m_info != Success && m_info != NoConvergence) return;
0433     if (m_compU)
0434       m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU();
0435     else 
0436     {
0437       m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0);
0438       m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n);
0439     }
0440     if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV();
0441     m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
0442     m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n);
0443     return;
0444   }
0445   // We use the divide and conquer algorithm
0446   alphaK =  m_computed(firstCol + k, firstCol + k);
0447   betaK = m_computed(firstCol + k + 1, firstCol + k);
0448   // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
0449   // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the 
0450   // right submatrix before the left one. 
0451   divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
0452   if (m_info != Success && m_info != NoConvergence) return;
0453   divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
0454   if (m_info != Success && m_info != NoConvergence) return;
0455 
0456   if (m_compU)
0457   {
0458     lambda = m_naiveU(firstCol + k, firstCol + k);
0459     phi = m_naiveU(firstCol + k + 1, lastCol + 1);
0460   } 
0461   else 
0462   {
0463     lambda = m_naiveU(1, firstCol + k);
0464     phi = m_naiveU(0, lastCol + 1);
0465   }
0466   r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi));
0467   if (m_compU)
0468   {
0469     l = m_naiveU.row(firstCol + k).segment(firstCol, k);
0470     f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
0471   } 
0472   else 
0473   {
0474     l = m_naiveU.row(1).segment(firstCol, k);
0475     f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
0476   }
0477   if (m_compV) m_naiveV(firstRowW+k, firstColW) = Literal(1);
0478   if (r0<considerZero)
0479   {
0480     c0 = Literal(1);
0481     s0 = Literal(0);
0482   }
0483   else
0484   {
0485     c0 = alphaK * lambda / r0;
0486     s0 = betaK * phi / r0;
0487   }
0488   
0489 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0490   assert(m_naiveU.allFinite());
0491   assert(m_naiveV.allFinite());
0492   assert(m_computed.allFinite());
0493 #endif
0494   
0495   if (m_compU)
0496   {
0497     MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));     
0498     // we shiftW Q1 to the right
0499     for (Index i = firstCol + k - 1; i >= firstCol; i--) 
0500       m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1);
0501     // we shift q1 at the left with a factor c0
0502     m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0);
0503     // last column = q1 * - s0
0504     m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0));
0505     // first column = q2 * s0
0506     m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0; 
0507     // q2 *= c0
0508     m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
0509   } 
0510   else 
0511   {
0512     RealScalar q1 = m_naiveU(0, firstCol + k);
0513     // we shift Q1 to the right
0514     for (Index i = firstCol + k - 1; i >= firstCol; i--) 
0515       m_naiveU(0, i + 1) = m_naiveU(0, i);
0516     // we shift q1 at the left with a factor c0
0517     m_naiveU(0, firstCol) = (q1 * c0);
0518     // last column = q1 * - s0
0519     m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
0520     // first column = q2 * s0
0521     m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; 
0522     // q2 *= c0
0523     m_naiveU(1, lastCol + 1) *= c0;
0524     m_naiveU.row(1).segment(firstCol + 1, k).setZero();
0525     m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
0526   }
0527   
0528 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0529   assert(m_naiveU.allFinite());
0530   assert(m_naiveV.allFinite());
0531   assert(m_computed.allFinite());
0532 #endif
0533   
0534   m_computed(firstCol + shift, firstCol + shift) = r0;
0535   m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real();
0536   m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real();
0537 
0538 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
0539   ArrayXr tmp1 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
0540 #endif
0541   // Second part: try to deflate singular values in combined matrix
0542   deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
0543 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
0544   ArrayXr tmp2 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
0545   std::cout << "\n\nj1 = " << tmp1.transpose().format(bdcsvdfmt) << "\n";
0546   std::cout << "j2 = " << tmp2.transpose().format(bdcsvdfmt) << "\n\n";
0547   std::cout << "err:      " << ((tmp1-tmp2).abs()>1e-12*tmp2.abs()).transpose() << "\n";
0548   static int count = 0;
0549   std::cout << "# " << ++count << "\n\n";
0550   assert((tmp1-tmp2).matrix().norm() < 1e-14*tmp2.matrix().norm());
0551 //   assert(count<681);
0552 //   assert(((tmp1-tmp2).abs()<1e-13*tmp2.abs()).all());
0553 #endif
0554   
0555   // Third part: compute SVD of combined matrix
0556   MatrixXr UofSVD, VofSVD;
0557   VectorType singVals;
0558   computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD);
0559   
0560 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0561   assert(UofSVD.allFinite());
0562   assert(VofSVD.allFinite());
0563 #endif
0564   
0565   if (m_compU)
0566     structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2);
0567   else
0568   {
0569     Map<Matrix<RealScalar,2,Dynamic>,Aligned> tmp(m_workspace.data(),2,n+1);
0570     tmp.noalias() = m_naiveU.middleCols(firstCol, n+1) * UofSVD;
0571     m_naiveU.middleCols(firstCol, n + 1) = tmp;
0572   }
0573   
0574   if (m_compV)  structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2);
0575   
0576 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0577   assert(m_naiveU.allFinite());
0578   assert(m_naiveV.allFinite());
0579   assert(m_computed.allFinite());
0580 #endif
0581   
0582   m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero();
0583   m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals;
0584 }// end divide
0585 
0586 // Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in
0587 // the first column and on the diagonal and has undergone deflation, so diagonal is in increasing
0588 // order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except
0589 // that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order.
0590 //
0591 // TODO Opportunities for optimization: better root finding algo, better stopping criterion, better
0592 // handling of round-off errors, be consistent in ordering
0593 // For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
0594 template <typename MatrixType>
0595 void BDCSVD<MatrixType>::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
0596 {
0597   const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
0598   using std::abs;
0599   ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n);
0600   m_workspace.head(n) =  m_computed.block(firstCol, firstCol, n, n).diagonal();
0601   ArrayRef diag = m_workspace.head(n);
0602   diag(0) = Literal(0);
0603 
0604   // Allocate space for singular values and vectors
0605   singVals.resize(n);
0606   U.resize(n+1, n+1);
0607   if (m_compV) V.resize(n, n);
0608 
0609 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
0610   if (col0.hasNaN() || diag.hasNaN())
0611     std::cout << "\n\nHAS NAN\n\n";
0612 #endif
0613   
0614   // Many singular values might have been deflated, the zero ones have been moved to the end,
0615   // but others are interleaved and we must ignore them at this stage.
0616   // To this end, let's compute a permutation skipping them:
0617   Index actual_n = n;
0618   while(actual_n>1 && diag(actual_n-1)==Literal(0)) {--actual_n; eigen_internal_assert(col0(actual_n)==Literal(0)); }
0619   Index m = 0; // size of the deflated problem
0620   for(Index k=0;k<actual_n;++k)
0621     if(abs(col0(k))>considerZero)
0622       m_workspaceI(m++) = k;
0623   Map<ArrayXi> perm(m_workspaceI.data(),m);
0624   
0625   Map<ArrayXr> shifts(m_workspace.data()+1*n, n);
0626   Map<ArrayXr> mus(m_workspace.data()+2*n, n);
0627   Map<ArrayXr> zhat(m_workspace.data()+3*n, n);
0628 
0629 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
0630   std::cout << "computeSVDofM using:\n";
0631   std::cout << "  z: " << col0.transpose() << "\n";
0632   std::cout << "  d: " << diag.transpose() << "\n";
0633 #endif
0634   
0635   // Compute singVals, shifts, and mus
0636   computeSingVals(col0, diag, perm, singVals, shifts, mus);
0637   
0638 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
0639   std::cout << "  j:        " << (m_computed.block(firstCol, firstCol, n, n)).jacobiSvd().singularValues().transpose().reverse() << "\n\n";
0640   std::cout << "  sing-val: " << singVals.transpose() << "\n";
0641   std::cout << "  mu:       " << mus.transpose() << "\n";
0642   std::cout << "  shift:    " << shifts.transpose() << "\n";
0643   
0644   {
0645     std::cout << "\n\n    mus:    " << mus.head(actual_n).transpose() << "\n\n";
0646     std::cout << "    check1 (expect0) : " << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << "\n\n";
0647     assert((((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n) >= 0).all());
0648     std::cout << "    check2 (>0)      : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n";
0649     assert((((singVals.array()-diag) / singVals.array()).head(actual_n) >= 0).all());
0650   }
0651 #endif
0652   
0653 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0654   assert(singVals.allFinite());
0655   assert(mus.allFinite());
0656   assert(shifts.allFinite());
0657 #endif
0658   
0659   // Compute zhat
0660   perturbCol0(col0, diag, perm, singVals, shifts, mus, zhat);
0661 #ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE
0662   std::cout << "  zhat: " << zhat.transpose() << "\n";
0663 #endif
0664   
0665 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0666   assert(zhat.allFinite());
0667 #endif
0668   
0669   computeSingVecs(zhat, diag, perm, singVals, shifts, mus, U, V);
0670   
0671 #ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE
0672   std::cout << "U^T U: " << (U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() << "\n";
0673   std::cout << "V^T V: " << (V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() << "\n";
0674 #endif
0675   
0676 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0677   assert(m_naiveU.allFinite());
0678   assert(m_naiveV.allFinite());
0679   assert(m_computed.allFinite());
0680   assert(U.allFinite());
0681   assert(V.allFinite());
0682 //   assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
0683 //   assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
0684 #endif
0685   
0686   // Because of deflation, the singular values might not be completely sorted.
0687   // Fortunately, reordering them is a O(n) problem
0688   for(Index i=0; i<actual_n-1; ++i)
0689   {
0690     if(singVals(i)>singVals(i+1))
0691     {
0692       using std::swap;
0693       swap(singVals(i),singVals(i+1));
0694       U.col(i).swap(U.col(i+1));
0695       if(m_compV) V.col(i).swap(V.col(i+1));
0696     }
0697   }
0698 
0699 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0700   {
0701     bool singular_values_sorted = (((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).array() >= 0).all();
0702     if(!singular_values_sorted)
0703       std::cout << "Singular values are not sorted: " << singVals.segment(1,actual_n).transpose() << "\n";
0704     assert(singular_values_sorted);
0705   }
0706 #endif
0707   
0708   // Reverse order so that singular values in increased order
0709   // Because of deflation, the zeros singular-values are already at the end
0710   singVals.head(actual_n).reverseInPlace();
0711   U.leftCols(actual_n).rowwise().reverseInPlace();
0712   if (m_compV) V.leftCols(actual_n).rowwise().reverseInPlace();
0713   
0714 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
0715   JacobiSVD<MatrixXr> jsvd(m_computed.block(firstCol, firstCol, n, n) );
0716   std::cout << "  * j:        " << jsvd.singularValues().transpose() << "\n\n";
0717   std::cout << "  * sing-val: " << singVals.transpose() << "\n";
0718 //   std::cout << "  * err:      " << ((jsvd.singularValues()-singVals)>1e-13*singVals.norm()).transpose() << "\n";
0719 #endif
0720 }
0721 
0722 template <typename MatrixType>
0723 typename BDCSVD<MatrixType>::RealScalar BDCSVD<MatrixType>::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift)
0724 {
0725   Index m = perm.size();
0726   RealScalar res = Literal(1);
0727   for(Index i=0; i<m; ++i)
0728   {
0729     Index j = perm(i);
0730     // The following expression could be rewritten to involve only a single division,
0731     // but this would make the expression more sensitive to overflow.
0732     res += (col0(j) / (diagShifted(j) - mu)) * (col0(j) / (diag(j) + shift + mu));
0733   }
0734   return res;
0735 
0736 }
0737 
0738 template <typename MatrixType>
0739 void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm,
0740                                          VectorType& singVals, ArrayRef shifts, ArrayRef mus)
0741 {
0742   using std::abs;
0743   using std::swap;
0744   using std::sqrt;
0745 
0746   Index n = col0.size();
0747   Index actual_n = n;
0748   // Note that here actual_n is computed based on col0(i)==0 instead of diag(i)==0 as above
0749   // because 1) we have diag(i)==0 => col0(i)==0 and 2) if col0(i)==0, then diag(i) is already a singular value.
0750   while(actual_n>1 && col0(actual_n-1)==Literal(0)) --actual_n;
0751 
0752   for (Index k = 0; k < n; ++k)
0753   {
0754     if (col0(k) == Literal(0) || actual_n==1)
0755     {
0756       // if col0(k) == 0, then entry is deflated, so singular value is on diagonal
0757       // if actual_n==1, then the deflated problem is already diagonalized
0758       singVals(k) = k==0 ? col0(0) : diag(k);
0759       mus(k) = Literal(0);
0760       shifts(k) = k==0 ? col0(0) : diag(k);
0761       continue;
0762     } 
0763 
0764     // otherwise, use secular equation to find singular value
0765     RealScalar left = diag(k);
0766     RealScalar right; // was: = (k != actual_n-1) ? diag(k+1) : (diag(actual_n-1) + col0.matrix().norm());
0767     if(k==actual_n-1)
0768       right = (diag(actual_n-1) + col0.matrix().norm());
0769     else
0770     {
0771       // Skip deflated singular values,
0772       // recall that at this stage we assume that z[j]!=0 and all entries for which z[j]==0 have been put aside.
0773       // This should be equivalent to using perm[]
0774       Index l = k+1;
0775       while(col0(l)==Literal(0)) { ++l; eigen_internal_assert(l<actual_n); }
0776       right = diag(l);
0777     }
0778 
0779     // first decide whether it's closer to the left end or the right end
0780     RealScalar mid = left + (right-left) / Literal(2);
0781     RealScalar fMid = secularEq(mid, col0, diag, perm, diag, Literal(0));
0782 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
0783     std::cout << "right-left = " << right-left << "\n";
0784 //     std::cout << "fMid = " << fMid << " " << secularEq(mid-left, col0, diag, perm, ArrayXr(diag-left), left)
0785 //                            << " " << secularEq(mid-right, col0, diag, perm, ArrayXr(diag-right), right)   << "\n";
0786     std::cout << "     = " << secularEq(left+RealScalar(0.000001)*(right-left), col0, diag, perm, diag, 0)
0787               << " "       << secularEq(left+RealScalar(0.1)     *(right-left), col0, diag, perm, diag, 0)
0788               << " "       << secularEq(left+RealScalar(0.2)     *(right-left), col0, diag, perm, diag, 0)
0789               << " "       << secularEq(left+RealScalar(0.3)     *(right-left), col0, diag, perm, diag, 0)
0790               << " "       << secularEq(left+RealScalar(0.4)     *(right-left), col0, diag, perm, diag, 0)
0791               << " "       << secularEq(left+RealScalar(0.49)    *(right-left), col0, diag, perm, diag, 0)
0792               << " "       << secularEq(left+RealScalar(0.5)     *(right-left), col0, diag, perm, diag, 0)
0793               << " "       << secularEq(left+RealScalar(0.51)    *(right-left), col0, diag, perm, diag, 0)
0794               << " "       << secularEq(left+RealScalar(0.6)     *(right-left), col0, diag, perm, diag, 0)
0795               << " "       << secularEq(left+RealScalar(0.7)     *(right-left), col0, diag, perm, diag, 0)
0796               << " "       << secularEq(left+RealScalar(0.8)     *(right-left), col0, diag, perm, diag, 0)
0797               << " "       << secularEq(left+RealScalar(0.9)     *(right-left), col0, diag, perm, diag, 0)
0798               << " "       << secularEq(left+RealScalar(0.999999)*(right-left), col0, diag, perm, diag, 0) << "\n";
0799 #endif
0800     RealScalar shift = (k == actual_n-1 || fMid > Literal(0)) ? left : right;
0801     
0802     // measure everything relative to shift
0803     Map<ArrayXr> diagShifted(m_workspace.data()+4*n, n);
0804     diagShifted = diag - shift;
0805 
0806     if(k!=actual_n-1)
0807     {
0808       // check that after the shift, f(mid) is still negative:
0809       RealScalar midShifted = (right - left) / RealScalar(2);
0810       if(shift==right)
0811         midShifted = -midShifted;
0812       RealScalar fMidShifted = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
0813       if(fMidShifted>0)
0814       {
0815         // fMid was erroneous, fix it:
0816         shift =  fMidShifted > Literal(0) ? left : right;
0817         diagShifted = diag - shift;
0818       }
0819     }
0820     
0821     // initial guess
0822     RealScalar muPrev, muCur;
0823     if (shift == left)
0824     {
0825       muPrev = (right - left) * RealScalar(0.1);
0826       if (k == actual_n-1) muCur = right - left;
0827       else                 muCur = (right - left) * RealScalar(0.5);
0828     }
0829     else
0830     {
0831       muPrev = -(right - left) * RealScalar(0.1);
0832       muCur = -(right - left) * RealScalar(0.5);
0833     }
0834 
0835     RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift);
0836     RealScalar fCur = secularEq(muCur, col0, diag, perm, diagShifted, shift);
0837     if (abs(fPrev) < abs(fCur))
0838     {
0839       swap(fPrev, fCur);
0840       swap(muPrev, muCur);
0841     }
0842 
0843     // rational interpolation: fit a function of the form a / mu + b through the two previous
0844     // iterates and use its zero to compute the next iterate
0845     bool useBisection = fPrev*fCur>Literal(0);
0846     while (fCur!=Literal(0) && abs(muCur - muPrev) > Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits<RealScalar>::epsilon() && !useBisection)
0847     {
0848       ++m_numIters;
0849 
0850       // Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples.
0851       RealScalar a = (fCur - fPrev) / (Literal(1)/muCur - Literal(1)/muPrev);
0852       RealScalar b = fCur - a / muCur;
0853       // And find mu such that f(mu)==0:
0854       RealScalar muZero = -a/b;
0855       RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift);
0856 
0857 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0858       assert((numext::isfinite)(fZero));
0859 #endif
0860       
0861       muPrev = muCur;
0862       fPrev = fCur;
0863       muCur = muZero;
0864       fCur = fZero;
0865       
0866       if (shift == left  && (muCur < Literal(0) || muCur > right - left)) useBisection = true;
0867       if (shift == right && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true;
0868       if (abs(fCur)>abs(fPrev)) useBisection = true;
0869     }
0870 
0871     // fall back on bisection method if rational interpolation did not work
0872     if (useBisection)
0873     {
0874 #ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE
0875       std::cout << "useBisection for k = " << k << ", actual_n = " << actual_n << "\n";
0876 #endif
0877       RealScalar leftShifted, rightShifted;
0878       if (shift == left)
0879       {
0880         // to avoid overflow, we must have mu > max(real_min, |z(k)|/sqrt(real_max)),
0881         // the factor 2 is to be more conservative
0882         leftShifted = numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), Literal(2) * abs(col0(k)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
0883 
0884         // check that we did it right:
0885         eigen_internal_assert( (numext::isfinite)( (col0(k)/leftShifted)*(col0(k)/(diag(k)+shift+leftShifted)) ) );
0886         // I don't understand why the case k==0 would be special there:
0887         // if (k == 0) rightShifted = right - left; else
0888         rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.51)); // theoretically we can take 0.5, but let's be safe
0889       }
0890       else
0891       {
0892         leftShifted = -(right - left) * RealScalar(0.51);
0893         if(k+1<n)
0894           rightShifted = -numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), abs(col0(k+1)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
0895         else
0896           rightShifted = -(std::numeric_limits<RealScalar>::min)();
0897       }
0898 
0899       RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift);
0900       eigen_internal_assert(fLeft<Literal(0));
0901 
0902 #if defined EIGEN_INTERNAL_DEBUGGING || defined EIGEN_BDCSVD_SANITY_CHECKS
0903       RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift);
0904 #endif
0905 
0906 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0907       if(!(numext::isfinite)(fLeft))
0908         std::cout << "f(" << leftShifted << ") =" << fLeft << " ; " << left << " " << shift << " " << right << "\n";
0909       assert((numext::isfinite)(fLeft));
0910 
0911       if(!(numext::isfinite)(fRight))
0912         std::cout << "f(" << rightShifted << ") =" << fRight << " ; " << left << " " << shift << " " << right << "\n";
0913       // assert((numext::isfinite)(fRight));
0914 #endif
0915     
0916 #ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE
0917       if(!(fLeft * fRight<0))
0918       {
0919         std::cout << "f(leftShifted) using  leftShifted=" << leftShifted << " ;  diagShifted(1:10):" << diagShifted.head(10).transpose()  << "\n ; "
0920                   << "left==shift=" << bool(left==shift) << " ; left-shift = " << (left-shift) << "\n";
0921         std::cout << "k=" << k << ", " <<  fLeft << " * " << fRight << " == " << fLeft * fRight << "  ;  "
0922                   << "[" << left << " .. " << right << "] -> [" << leftShifted << " " << rightShifted << "], shift=" << shift
0923                   << " ,  f(right)=" << secularEq(0,     col0, diag, perm, diagShifted, shift)
0924                            << " == " << secularEq(right, col0, diag, perm, diag, 0) << " == " << fRight << "\n";
0925       }
0926 #endif
0927       eigen_internal_assert(fLeft * fRight < Literal(0));
0928 
0929       if(fLeft<Literal(0))
0930       {
0931         while (rightShifted - leftShifted > Literal(2) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))
0932         {
0933           RealScalar midShifted = (leftShifted + rightShifted) / Literal(2);
0934           fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
0935           eigen_internal_assert((numext::isfinite)(fMid));
0936 
0937           if (fLeft * fMid < Literal(0))
0938           {
0939             rightShifted = midShifted;
0940           }
0941           else
0942           {
0943             leftShifted = midShifted;
0944             fLeft = fMid;
0945           }
0946         }
0947         muCur = (leftShifted + rightShifted) / Literal(2);
0948       }
0949       else 
0950       {
0951         // We have a problem as shifting on the left or right give either a positive or negative value
0952         // at the middle of [left,right]...
0953         // Instead fo abbording or entering an infinite loop,
0954         // let's just use the middle as the estimated zero-crossing:
0955         muCur = (right - left) * RealScalar(0.5);
0956         if(shift == right)
0957           muCur = -muCur;
0958       }
0959     }
0960       
0961     singVals[k] = shift + muCur;
0962     shifts[k] = shift;
0963     mus[k] = muCur;
0964 
0965 #ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE
0966     if(k+1<n)
0967       std::cout << "found " << singVals[k] << " == " << shift << " + " << muCur << " from " << diag(k) << " .. "  << diag(k+1) << "\n";
0968 #endif
0969 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
0970     assert(k==0 || singVals[k]>=singVals[k-1]);
0971     assert(singVals[k]>=diag(k));
0972 #endif
0973 
0974     // perturb singular value slightly if it equals diagonal entry to avoid division by zero later
0975     // (deflation is supposed to avoid this from happening)
0976     // - this does no seem to be necessary anymore -
0977 //     if (singVals[k] == left) singVals[k] *= 1 + NumTraits<RealScalar>::epsilon();
0978 //     if (singVals[k] == right) singVals[k] *= 1 - NumTraits<RealScalar>::epsilon();
0979   }
0980 }
0981 
0982 
0983 // zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1)
0984 template <typename MatrixType>
0985 void BDCSVD<MatrixType>::perturbCol0
0986    (const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
0987     const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat)
0988 {
0989   using std::sqrt;
0990   Index n = col0.size();
0991   Index m = perm.size();
0992   if(m==0)
0993   {
0994     zhat.setZero();
0995     return;
0996   }
0997   Index lastIdx = perm(m-1);
0998   // The offset permits to skip deflated entries while computing zhat
0999   for (Index k = 0; k < n; ++k)
1000   {
1001     if (col0(k) == Literal(0)) // deflated
1002       zhat(k) = Literal(0);
1003     else
1004     {
1005       // see equation (3.6)
1006       RealScalar dk = diag(k);
1007       RealScalar prod = (singVals(lastIdx) + dk) * (mus(lastIdx) + (shifts(lastIdx) - dk));
1008 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1009       if(prod<0) {
1010         std::cout << "k = " << k << " ;  z(k)=" << col0(k) << ", diag(k)=" << dk << "\n";
1011         std::cout << "prod = " << "(" << singVals(lastIdx) << " + " << dk << ") * (" << mus(lastIdx) << " + (" << shifts(lastIdx) << " - " << dk << "))" << "\n";
1012         std::cout << "     = " << singVals(lastIdx) + dk << " * " << mus(lastIdx) + (shifts(lastIdx) - dk) <<  "\n";
1013       }
1014       assert(prod>=0);
1015 #endif
1016 
1017       for(Index l = 0; l<m; ++l)
1018       {
1019         Index i = perm(l);
1020         if(i!=k)
1021         {
1022 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1023           if(i>=k && (l==0 || l-1>=m))
1024           {
1025             std::cout << "Error in perturbCol0\n";
1026             std::cout << "  " << k << "/" << n << " "  << l << "/" << m << " " << i << "/" << n << " ; " << col0(k) << " " << diag(k) << " "  <<  "\n";
1027             std::cout << "  " <<diag(i) << "\n";
1028             Index j = (i<k /*|| l==0*/) ? i : perm(l-1);
1029             std::cout << "  " << "j=" << j << "\n";
1030           }
1031 #endif
1032           Index j = i<k ? i : perm(l-1);
1033 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1034           if(!(dk!=Literal(0) || diag(i)!=Literal(0)))
1035           {
1036             std::cout << "k=" << k << ", i=" << i << ", l=" << l << ", perm.size()=" << perm.size() << "\n";
1037           }
1038           assert(dk!=Literal(0) || diag(i)!=Literal(0));
1039 #endif
1040           prod *= ((singVals(j)+dk) / ((diag(i)+dk))) * ((mus(j)+(shifts(j)-dk)) / ((diag(i)-dk)));
1041 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1042           assert(prod>=0);
1043 #endif
1044 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1045           if(i!=k && numext::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 )
1046             std::cout << "     " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk))
1047                        << ") / (" << (diag(i)+dk) << " * " << (diag(i)-dk) << ")\n";
1048 #endif
1049         }
1050       }
1051 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1052       std::cout << "zhat(" << k << ") =  sqrt( " << prod << ")  ;  " << (singVals(lastIdx) + dk) << " * " << mus(lastIdx) + shifts(lastIdx) << " - " << dk << "\n";
1053 #endif
1054       RealScalar tmp = sqrt(prod);
1055 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1056       assert((numext::isfinite)(tmp));
1057 #endif
1058       zhat(k) = col0(k) > Literal(0) ? RealScalar(tmp) : RealScalar(-tmp);
1059     }
1060   }
1061 }
1062 
1063 // compute singular vectors
1064 template <typename MatrixType>
1065 void BDCSVD<MatrixType>::computeSingVecs
1066    (const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
1067     const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V)
1068 {
1069   Index n = zhat.size();
1070   Index m = perm.size();
1071   
1072   for (Index k = 0; k < n; ++k)
1073   {
1074     if (zhat(k) == Literal(0))
1075     {
1076       U.col(k) = VectorType::Unit(n+1, k);
1077       if (m_compV) V.col(k) = VectorType::Unit(n, k);
1078     }
1079     else
1080     {
1081       U.col(k).setZero();
1082       for(Index l=0;l<m;++l)
1083       {
1084         Index i = perm(l);
1085         U(i,k) = zhat(i)/(((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
1086       }
1087       U(n,k) = Literal(0);
1088       U.col(k).normalize();
1089     
1090       if (m_compV)
1091       {
1092         V.col(k).setZero();
1093         for(Index l=1;l<m;++l)
1094         {
1095           Index i = perm(l);
1096           V(i,k) = diag(i) * zhat(i) / (((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
1097         }
1098         V(0,k) = Literal(-1);
1099         V.col(k).normalize();
1100       }
1101     }
1102   }
1103   U.col(n) = VectorType::Unit(n+1, n);
1104 }
1105 
1106 
1107 // page 12_13
1108 // i >= 1, di almost null and zi non null.
1109 // We use a rotation to zero out zi applied to the left of M
1110 template <typename MatrixType>
1111 void BDCSVD<MatrixType>::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size)
1112 {
1113   using std::abs;
1114   using std::sqrt;
1115   using std::pow;
1116   Index start = firstCol + shift;
1117   RealScalar c = m_computed(start, start);
1118   RealScalar s = m_computed(start+i, start);
1119   RealScalar r = numext::hypot(c,s);
1120   if (r == Literal(0))
1121   {
1122     m_computed(start+i, start+i) = Literal(0);
1123     return;
1124   }
1125   m_computed(start,start) = r;  
1126   m_computed(start+i, start) = Literal(0);
1127   m_computed(start+i, start+i) = Literal(0);
1128   
1129   JacobiRotation<RealScalar> J(c/r,-s/r);
1130   if (m_compU)  m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J);
1131   else          m_naiveU.applyOnTheRight(firstCol, firstCol+i, J);
1132 }// end deflation 43
1133 
1134 
1135 // page 13
1136 // i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M)
1137 // We apply two rotations to have zj = 0;
1138 // TODO deflation44 is still broken and not properly tested
1139 template <typename MatrixType>
1140 void BDCSVD<MatrixType>::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size)
1141 {
1142   using std::abs;
1143   using std::sqrt;
1144   using std::conj;
1145   using std::pow;
1146   RealScalar c = m_computed(firstColm+i, firstColm);
1147   RealScalar s = m_computed(firstColm+j, firstColm);
1148   RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s));
1149 #ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE
1150   std::cout << "deflation 4.4: " << i << "," << j << " -> " << c << " " << s << " " << r << " ; "
1151     << m_computed(firstColm + i-1, firstColm)  << " "
1152     << m_computed(firstColm + i, firstColm)  << " "
1153     << m_computed(firstColm + i+1, firstColm) << " "
1154     << m_computed(firstColm + i+2, firstColm) << "\n";
1155   std::cout << m_computed(firstColm + i-1, firstColm + i-1)  << " "
1156     << m_computed(firstColm + i, firstColm+i)  << " "
1157     << m_computed(firstColm + i+1, firstColm+i+1) << " "
1158     << m_computed(firstColm + i+2, firstColm+i+2) << "\n";
1159 #endif
1160   if (r==Literal(0))
1161   {
1162     m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
1163     return;
1164   }
1165   c/=r;
1166   s/=r;
1167   m_computed(firstColm + i, firstColm) = r;
1168   m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i);
1169   m_computed(firstColm + j, firstColm) = Literal(0);
1170 
1171   JacobiRotation<RealScalar> J(c,-s);
1172   if (m_compU)  m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J);
1173   else          m_naiveU.applyOnTheRight(firstColu+i, firstColu+j, J);
1174   if (m_compV)  m_naiveV.middleRows(firstRowW, size).applyOnTheRight(firstColW + i, firstColW + j, J);
1175 }// end deflation 44
1176 
1177 
1178 // acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
1179 template <typename MatrixType>
1180 void BDCSVD<MatrixType>::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
1181 {
1182   using std::sqrt;
1183   using std::abs;
1184   const Index length = lastCol + 1 - firstCol;
1185   
1186   Block<MatrixXr,Dynamic,1> col0(m_computed, firstCol+shift, firstCol+shift, length, 1);
1187   Diagonal<MatrixXr> fulldiag(m_computed);
1188   VectorBlock<Diagonal<MatrixXr>,Dynamic> diag(fulldiag, firstCol+shift, length);
1189   
1190   const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
1191   RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff();
1192   RealScalar epsilon_strict = numext::maxi<RealScalar>(considerZero,NumTraits<RealScalar>::epsilon() * maxDiag);
1193   RealScalar epsilon_coarse = Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(col0.cwiseAbs().maxCoeff(), maxDiag);
1194   
1195 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1196   assert(m_naiveU.allFinite());
1197   assert(m_naiveV.allFinite());
1198   assert(m_computed.allFinite());
1199 #endif
1200 
1201 #ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE  
1202   std::cout << "\ndeflate:" << diag.head(k+1).transpose() << "  |  " << diag.segment(k+1,length-k-1).transpose() << "\n";
1203 #endif
1204   
1205   //condition 4.1
1206   if (diag(0) < epsilon_coarse)
1207   { 
1208 #ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE
1209     std::cout << "deflation 4.1, because " << diag(0) << " < " << epsilon_coarse << "\n";
1210 #endif
1211     diag(0) = epsilon_coarse;
1212   }
1213 
1214   //condition 4.2
1215   for (Index i=1;i<length;++i)
1216     if (abs(col0(i)) < epsilon_strict)
1217     {
1218 #ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE
1219       std::cout << "deflation 4.2, set z(" << i << ") to zero because " << abs(col0(i)) << " < " << epsilon_strict << "  (diag(" << i << ")=" << diag(i) << ")\n";
1220 #endif
1221       col0(i) = Literal(0);
1222     }
1223 
1224   //condition 4.3
1225   for (Index i=1;i<length; i++)
1226     if (diag(i) < epsilon_coarse)
1227     {
1228 #ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE
1229       std::cout << "deflation 4.3, cancel z(" << i << ")=" << col0(i) << " because diag(" << i << ")=" << diag(i) << " < " << epsilon_coarse << "\n";
1230 #endif
1231       deflation43(firstCol, shift, i, length);
1232     }
1233 
1234 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1235   assert(m_naiveU.allFinite());
1236   assert(m_naiveV.allFinite());
1237   assert(m_computed.allFinite());
1238 #endif
1239 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1240   std::cout << "to be sorted: " << diag.transpose() << "\n\n";
1241   std::cout << "            : " << col0.transpose() << "\n\n";
1242 #endif
1243   {
1244     // Check for total deflation
1245     // If we have a total deflation, then we have to consider col0(0)==diag(0) as a singular value during sorting
1246     bool total_deflation = (col0.tail(length-1).array()<considerZero).all();
1247     
1248     // Sort the diagonal entries, since diag(1:k-1) and diag(k:length) are already sorted, let's do a sorted merge.
1249     // First, compute the respective permutation.
1250     Index *permutation = m_workspaceI.data();
1251     {
1252       permutation[0] = 0;
1253       Index p = 1;
1254       
1255       // Move deflated diagonal entries at the end.
1256       for(Index i=1; i<length; ++i)
1257         if(abs(diag(i))<considerZero)
1258           permutation[p++] = i;
1259         
1260       Index i=1, j=k+1;
1261       for( ; p < length; ++p)
1262       {
1263              if (i > k)             permutation[p] = j++;
1264         else if (j >= length)       permutation[p] = i++;
1265         else if (diag(i) < diag(j)) permutation[p] = j++;
1266         else                        permutation[p] = i++;
1267       }
1268     }
1269     
1270     // If we have a total deflation, then we have to insert diag(0) at the right place
1271     if(total_deflation)
1272     {
1273       for(Index i=1; i<length; ++i)
1274       {
1275         Index pi = permutation[i];
1276         if(abs(diag(pi))<considerZero || diag(0)<diag(pi))
1277           permutation[i-1] = permutation[i];
1278         else
1279         {
1280           permutation[i-1] = 0;
1281           break;
1282         }
1283       }
1284     }
1285     
1286     // Current index of each col, and current column of each index
1287     Index *realInd = m_workspaceI.data()+length;
1288     Index *realCol = m_workspaceI.data()+2*length;
1289     
1290     for(int pos = 0; pos< length; pos++)
1291     {
1292       realCol[pos] = pos;
1293       realInd[pos] = pos;
1294     }
1295     
1296     for(Index i = total_deflation?0:1; i < length; i++)
1297     {
1298       const Index pi = permutation[length - (total_deflation ? i+1 : i)];
1299       const Index J = realCol[pi];
1300       
1301       using std::swap;
1302       // swap diagonal and first column entries:
1303       swap(diag(i), diag(J));
1304       if(i!=0 && J!=0) swap(col0(i), col0(J));
1305 
1306       // change columns
1307       if (m_compU) m_naiveU.col(firstCol+i).segment(firstCol, length + 1).swap(m_naiveU.col(firstCol+J).segment(firstCol, length + 1));
1308       else         m_naiveU.col(firstCol+i).segment(0, 2)                .swap(m_naiveU.col(firstCol+J).segment(0, 2));
1309       if (m_compV) m_naiveV.col(firstColW + i).segment(firstRowW, length).swap(m_naiveV.col(firstColW + J).segment(firstRowW, length));
1310 
1311       //update real pos
1312       const Index realI = realInd[i];
1313       realCol[realI] = J;
1314       realCol[pi] = i;
1315       realInd[J] = realI;
1316       realInd[i] = pi;
1317     }
1318   }
1319 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1320   std::cout << "sorted: " << diag.transpose().format(bdcsvdfmt) << "\n";
1321   std::cout << "      : " << col0.transpose() << "\n\n";
1322 #endif
1323     
1324   //condition 4.4
1325   {
1326     Index i = length-1;
1327     while(i>0 && (abs(diag(i))<considerZero || abs(col0(i))<considerZero)) --i;
1328     for(; i>1;--i)
1329        if( (diag(i) - diag(i-1)) < NumTraits<RealScalar>::epsilon()*maxDiag )
1330       {
1331 #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1332         std::cout << "deflation 4.4 with i = " << i << " because " << diag(i) << " - " << diag(i-1) << " == " << (diag(i) - diag(i-1)) << " < " << NumTraits<RealScalar>::epsilon()*/*diag(i)*/maxDiag << "\n";
1333 #endif
1334         eigen_internal_assert(abs(diag(i) - diag(i-1))<epsilon_coarse && " diagonal entries are not properly sorted");
1335         deflation44(firstCol, firstCol + shift, firstRowW, firstColW, i-1, i, length);
1336       }
1337   }
1338   
1339 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1340   for(Index j=2;j<length;++j)
1341     assert(diag(j-1)<=diag(j) || abs(diag(j))<considerZero);
1342 #endif
1343   
1344 #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1345   assert(m_naiveU.allFinite());
1346   assert(m_naiveV.allFinite());
1347   assert(m_computed.allFinite());
1348 #endif
1349 }//end deflation
1350 
1351 /** \svd_module
1352   *
1353   * \return the singular value decomposition of \c *this computed by Divide & Conquer algorithm
1354   *
1355   * \sa class BDCSVD
1356   */
1357 template<typename Derived>
1358 BDCSVD<typename MatrixBase<Derived>::PlainObject>
1359 MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
1360 {
1361   return BDCSVD<PlainObject>(*this, computationOptions);
1362 }
1363 
1364 } // end namespace Eigen
1365 
1366 #endif