// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD" // research report written by Ming Gu and Stanley C.Eisenstat // The code variable names correspond to the names they used in their // report // // Copyright (C) 2013 Gauthier Brun // Copyright (C) 2013 Nicolas Carre // Copyright (C) 2013 Jean Ceccato // Copyright (C) 2013 Pierre Zoppitelli // Copyright (C) 2013 Jitse Niesen // Copyright (C) 2014-2017 Gael Guennebaud // // Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BDCSVD_H #define EIGEN_BDCSVD_H // #define EIGEN_BDCSVD_DEBUG_VERBOSE // #define EIGEN_BDCSVD_SANITY_CHECKS #ifdef EIGEN_BDCSVD_SANITY_CHECKS #undef eigen_internal_assert #define eigen_internal_assert(X) assert(X); #endif namespace Eigen { #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE IOFormat bdcsvdfmt(8, 0, ", ", "\n", " [", "]"); #endif template class BDCSVD; namespace internal { template struct traits > { typedef _MatrixType MatrixType; }; } // end namespace internal /** \ingroup SVD_Module * * * \class BDCSVD * * \brief class Bidiagonal Divide and Conquer SVD * * \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition * * This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization, * and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD. * You can control the switching size with the setSwitchSize() method, default is 16. * For small matrice (<16), it is thus preferable to directly use JacobiSVD. For larger ones, BDCSVD is highly * recommended and can several order of magnitude faster. * * \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations. * For instance, this concerns Intel's compiler (ICC), which performs such optimization by default unless * you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will * significantly degrade the accuracy. * * \sa class JacobiSVD */ template class BDCSVD : public SVDBase > { typedef SVDBase Base; public: using Base::rows; using Base::cols; using Base::computeU; using Base::computeV; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename NumTraits::Literal Literal; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), MatrixOptions = MatrixType::Options }; typedef typename Base::MatrixUType MatrixUType; typedef typename Base::MatrixVType MatrixVType; typedef typename Base::SingularValuesType SingularValuesType; typedef Matrix MatrixX; typedef Matrix MatrixXr; typedef Matrix VectorType; typedef Array ArrayXr; typedef Array ArrayXi; typedef Ref ArrayRef; typedef Ref IndicesRef; /** \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via BDCSVD::compute(const MatrixType&). */ BDCSVD() : m_algoswap(16), m_numIters(0) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem size. * \sa BDCSVD() */ BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0) : m_algoswap(16), m_numIters(0) { allocate(rows, cols, computationOptions); } /** \brief Constructor performing the decomposition of given matrix. * * \param matrix the matrix to decompose * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, * #ComputeFullV, #ComputeThinV. * * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not * available with the (non - default) FullPivHouseholderQR preconditioner. */ BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0) : m_algoswap(16), m_numIters(0) { compute(matrix, computationOptions); } ~BDCSVD() { } /** \brief Method performing the decomposition of given matrix using custom options. * * \param matrix the matrix to decompose * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, * #ComputeFullV, #ComputeThinV. * * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not * available with the (non - default) FullPivHouseholderQR preconditioner. */ BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions); /** \brief Method performing the decomposition of given matrix using current options. * * \param matrix the matrix to decompose * * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). */ BDCSVD& compute(const MatrixType& matrix) { return compute(matrix, this->m_computationOptions); } void setSwitchSize(int s) { eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3"); m_algoswap = s; } private: void allocate(Index rows, Index cols, unsigned int computationOptions); void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift); void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V); void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus); void perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat); void computeSingVecs(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V); void deflation43(Index firstCol, Index shift, Index i, Index size); void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size); void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift); template void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev); void structured_update(Block A, const MatrixXr &B, Index n1); static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift); protected: MatrixXr m_naiveU, m_naiveV; MatrixXr m_computed; Index m_nRec; ArrayXr m_workspace; ArrayXi m_workspaceI; int m_algoswap; bool m_isTranspose, m_compU, m_compV; using Base::m_singularValues; using Base::m_diagSize; using Base::m_computeFullU; using Base::m_computeFullV; using Base::m_computeThinU; using Base::m_computeThinV; using Base::m_matrixU; using Base::m_matrixV; using Base::m_isInitialized; using Base::m_nonzeroSingularValues; public: int m_numIters; }; //end class BDCSVD // Method to allocate and initialize matrix and attributes template void BDCSVD::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions) { m_isTranspose = (cols > rows); if (Base::allocate(rows, cols, computationOptions)) return; m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize ); m_compU = computeV(); m_compV = computeU(); if (m_isTranspose) std::swap(m_compU, m_compV); if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 ); else m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 ); if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize); m_workspace.resize((m_diagSize+1)*(m_diagSize+1)*3); m_workspaceI.resize(3*m_diagSize); }// end allocate template BDCSVD& BDCSVD::compute(const MatrixType& matrix, unsigned int computationOptions) { #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << "\n\n\n======================================================================================================================\n\n\n"; #endif allocate(matrix.rows(), matrix.cols(), computationOptions); using std::abs; const RealScalar considerZero = (std::numeric_limits::min)(); //**** step -1 - If the problem is too small, directly falls back to JacobiSVD and return if(matrix.cols() < m_algoswap) { // FIXME this line involves temporaries JacobiSVD jsvd(matrix,computationOptions); if(computeU()) m_matrixU = jsvd.matrixU(); if(computeV()) m_matrixV = jsvd.matrixV(); m_singularValues = jsvd.singularValues(); m_nonzeroSingularValues = jsvd.nonzeroSingularValues(); m_isInitialized = true; return *this; } //**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows RealScalar scale = matrix.cwiseAbs().maxCoeff(); if(scale==Literal(0)) scale = Literal(1); MatrixX copy; if (m_isTranspose) copy = matrix.adjoint()/scale; else copy = matrix/scale; //**** step 1 - Bidiagonalization // FIXME this line involves temporaries internal::UpperBidiagonalization bid(copy); //**** step 2 - Divide & Conquer m_naiveU.setZero(); m_naiveV.setZero(); // FIXME this line involves a temporary matrix m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose(); m_computed.template bottomRows<1>().setZero(); divide(0, m_diagSize - 1, 0, 0, 0); //**** step 3 - Copy singular values and vectors for (int i=0; i template void BDCSVD::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV) { // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa if (computeU()) { Index Ucols = m_computeThinU ? m_diagSize : householderU.cols(); m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast().topLeftCorner(m_diagSize, m_diagSize); householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer } if (computeV()) { Index Vcols = m_computeThinV ? m_diagSize : householderV.cols(); m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast().topLeftCorner(m_diagSize, m_diagSize); householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer } } /** \internal * Performs A = A * B exploiting the special structure of the matrix A. Splitting A as: * A = [A1] * [A2] * such that A1.rows()==n1, then we assume that at least half of the columns of A1 and A2 are zeros. * We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large * enough. */ template void BDCSVD::structured_update(Block A, const MatrixXr &B, Index n1) { Index n = A.rows(); if(n>100) { // If the matrices are large enough, let's exploit the sparse structure of A by // splitting it in half (wrt n1), and packing the non-zero columns. Index n2 = n - n1; Map A1(m_workspace.data() , n1, n); Map A2(m_workspace.data()+ n1*n, n2, n); Map B1(m_workspace.data()+ n*n, n, n); Map B2(m_workspace.data()+2*n*n, n, n); Index k1=0, k2=0; for(Index j=0; j tmp(m_workspace.data(),n,n); tmp.noalias() = A*B; A = tmp; } } // The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the // place of the submatrix we are currently working on. //@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU; //@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; // lastCol + 1 - firstCol is the size of the submatrix. //@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W) //@param firstRowW : Same as firstRowW with the column. //@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix // to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper. template void BDCSVD::divide (Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift) { // requires rows = cols + 1; using std::pow; using std::sqrt; using std::abs; const Index n = lastCol - firstCol + 1; const Index k = n/2; const RealScalar considerZero = (std::numeric_limits::min)(); RealScalar alphaK; RealScalar betaK; RealScalar r0; RealScalar lambda, phi, c0, s0; VectorType l, f; // We use the other algorithm which is more efficient for small // matrices. if (n < m_algoswap) { // FIXME this line involves temporaries JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0)); if (m_compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU(); else { m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0); m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n); } if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV(); m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n); return; } // We use the divide and conquer algorithm alphaK = m_computed(firstCol + k, firstCol + k); betaK = m_computed(firstCol + k + 1, firstCol + k); // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the // right submatrix before the left one. divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift); divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1); if (m_compU) { lambda = m_naiveU(firstCol + k, firstCol + k); phi = m_naiveU(firstCol + k + 1, lastCol + 1); } else { lambda = m_naiveU(1, firstCol + k); phi = m_naiveU(0, lastCol + 1); } r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi)); if (m_compU) { l = m_naiveU.row(firstCol + k).segment(firstCol, k); f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1); } else { l = m_naiveU.row(1).segment(firstCol, k); f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1); } if (m_compV) m_naiveV(firstRowW+k, firstColW) = Literal(1); if (r0= firstCol; i--) m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1); // we shift q1 at the left with a factor c0 m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0); // last column = q1 * - s0 m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0)); // first column = q2 * s0 m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0; // q2 *= c0 m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; } else { RealScalar q1 = m_naiveU(0, firstCol + k); // we shift Q1 to the right for (Index i = firstCol + k - 1; i >= firstCol; i--) m_naiveU(0, i + 1) = m_naiveU(0, i); // we shift q1 at the left with a factor c0 m_naiveU(0, firstCol) = (q1 * c0); // last column = q1 * - s0 m_naiveU(0, lastCol + 1) = (q1 * ( - s0)); // first column = q2 * s0 m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; // q2 *= c0 m_naiveU(1, lastCol + 1) *= c0; m_naiveU.row(1).segment(firstCol + 1, k).setZero(); m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero(); } #ifdef EIGEN_BDCSVD_SANITY_CHECKS assert(m_naiveU.allFinite()); assert(m_naiveV.allFinite()); assert(m_computed.allFinite()); #endif m_computed(firstCol + shift, firstCol + shift) = r0; m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real(); m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real(); #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE ArrayXr tmp1 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues(); #endif // Second part: try to deflate singular values in combined matrix deflation(firstCol, lastCol, k, firstRowW, firstColW, shift); #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE ArrayXr tmp2 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues(); std::cout << "\n\nj1 = " << tmp1.transpose().format(bdcsvdfmt) << "\n"; std::cout << "j2 = " << tmp2.transpose().format(bdcsvdfmt) << "\n\n"; std::cout << "err: " << ((tmp1-tmp2).abs()>1e-12*tmp2.abs()).transpose() << "\n"; static int count = 0; std::cout << "# " << ++count << "\n\n"; assert((tmp1-tmp2).matrix().norm() < 1e-14*tmp2.matrix().norm()); // assert(count<681); // assert(((tmp1-tmp2).abs()<1e-13*tmp2.abs()).all()); #endif // Third part: compute SVD of combined matrix MatrixXr UofSVD, VofSVD; VectorType singVals; computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD); #ifdef EIGEN_BDCSVD_SANITY_CHECKS assert(UofSVD.allFinite()); assert(VofSVD.allFinite()); #endif if (m_compU) structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2); else { Map,Aligned> tmp(m_workspace.data(),2,n+1); tmp.noalias() = m_naiveU.middleCols(firstCol, n+1) * UofSVD; m_naiveU.middleCols(firstCol, n + 1) = tmp; } if (m_compV) structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2); #ifdef EIGEN_BDCSVD_SANITY_CHECKS assert(m_naiveU.allFinite()); assert(m_naiveV.allFinite()); assert(m_computed.allFinite()); #endif m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero(); m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals; }// end divide // Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in // the first column and on the diagonal and has undergone deflation, so diagonal is in increasing // order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except // that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order. // // TODO Opportunities for optimization: better root finding algo, better stopping criterion, better // handling of round-off errors, be consistent in ordering // For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf template void BDCSVD::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V) { const RealScalar considerZero = (std::numeric_limits::min)(); using std::abs; ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n); m_workspace.head(n) = m_computed.block(firstCol, firstCol, n, n).diagonal(); ArrayRef diag = m_workspace.head(n); diag(0) = Literal(0); // Allocate space for singular values and vectors singVals.resize(n); U.resize(n+1, n+1); if (m_compV) V.resize(n, n); #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE if (col0.hasNaN() || diag.hasNaN()) std::cout << "\n\nHAS NAN\n\n"; #endif // Many singular values might have been deflated, the zero ones have been moved to the end, // but others are interleaved and we must ignore them at this stage. // To this end, let's compute a permutation skipping them: Index actual_n = n; while(actual_n>1 && diag(actual_n-1)==Literal(0)) {--actual_n; eigen_internal_assert(col0(actual_n)==Literal(0)); } Index m = 0; // size of the deflated problem for(Index k=0;kconsiderZero) m_workspaceI(m++) = k; Map perm(m_workspaceI.data(),m); Map shifts(m_workspace.data()+1*n, n); Map mus(m_workspace.data()+2*n, n); Map zhat(m_workspace.data()+3*n, n); #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << "computeSVDofM using:\n"; std::cout << " z: " << col0.transpose() << "\n"; std::cout << " d: " << diag.transpose() << "\n"; #endif // Compute singVals, shifts, and mus computeSingVals(col0, diag, perm, singVals, shifts, mus); #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << " j: " << (m_computed.block(firstCol, firstCol, n, n)).jacobiSvd().singularValues().transpose().reverse() << "\n\n"; std::cout << " sing-val: " << singVals.transpose() << "\n"; std::cout << " mu: " << mus.transpose() << "\n"; std::cout << " shift: " << shifts.transpose() << "\n"; { std::cout << "\n\n mus: " << mus.head(actual_n).transpose() << "\n\n"; std::cout << " check1 (expect0) : " << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << "\n\n"; assert((((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n) >= 0).all()); std::cout << " check2 (>0) : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n"; assert((((singVals.array()-diag) / singVals.array()).head(actual_n) >= 0).all()); } #endif #ifdef EIGEN_BDCSVD_SANITY_CHECKS assert(singVals.allFinite()); assert(mus.allFinite()); assert(shifts.allFinite()); #endif // Compute zhat perturbCol0(col0, diag, perm, singVals, shifts, mus, zhat); #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << " zhat: " << zhat.transpose() << "\n"; #endif #ifdef EIGEN_BDCSVD_SANITY_CHECKS assert(zhat.allFinite()); #endif computeSingVecs(zhat, diag, perm, singVals, shifts, mus, U, V); #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << "U^T U: " << (U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() << "\n"; std::cout << "V^T V: " << (V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() << "\n"; #endif #ifdef EIGEN_BDCSVD_SANITY_CHECKS assert(m_naiveU.allFinite()); assert(m_naiveV.allFinite()); assert(m_computed.allFinite()); assert(U.allFinite()); assert(V.allFinite()); // assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 100*NumTraits::epsilon() * n); // assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 100*NumTraits::epsilon() * n); #endif // Because of deflation, the singular values might not be completely sorted. // Fortunately, reordering them is a O(n) problem for(Index i=0; isingVals(i+1)) { using std::swap; swap(singVals(i),singVals(i+1)); U.col(i).swap(U.col(i+1)); if(m_compV) V.col(i).swap(V.col(i+1)); } } #ifdef EIGEN_BDCSVD_SANITY_CHECKS { bool singular_values_sorted = (((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).array() >= 0).all(); if(!singular_values_sorted) std::cout << "Singular values are not sorted: " << singVals.segment(1,actual_n).transpose() << "\n"; assert(singular_values_sorted); } #endif // Reverse order so that singular values in increased order // Because of deflation, the zeros singular-values are already at the end singVals.head(actual_n).reverseInPlace(); U.leftCols(actual_n).rowwise().reverseInPlace(); if (m_compV) V.leftCols(actual_n).rowwise().reverseInPlace(); #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE JacobiSVD jsvd(m_computed.block(firstCol, firstCol, n, n) ); std::cout << " * j: " << jsvd.singularValues().transpose() << "\n\n"; std::cout << " * sing-val: " << singVals.transpose() << "\n"; // std::cout << " * err: " << ((jsvd.singularValues()-singVals)>1e-13*singVals.norm()).transpose() << "\n"; #endif } template typename BDCSVD::RealScalar BDCSVD::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift) { Index m = perm.size(); RealScalar res = Literal(1); for(Index i=0; i void BDCSVD::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus) { using std::abs; using std::swap; using std::sqrt; Index n = col0.size(); Index actual_n = n; // Note that here actual_n is computed based on col0(i)==0 instead of diag(i)==0 as above // because 1) we have diag(i)==0 => col0(i)==0 and 2) if col0(i)==0, then diag(i) is already a singular value. while(actual_n>1 && col0(actual_n-1)==Literal(0)) --actual_n; for (Index k = 0; k < n; ++k) { if (col0(k) == Literal(0) || actual_n==1) { // if col0(k) == 0, then entry is deflated, so singular value is on diagonal // if actual_n==1, then the deflated problem is already diagonalized singVals(k) = k==0 ? col0(0) : diag(k); mus(k) = Literal(0); shifts(k) = k==0 ? col0(0) : diag(k); continue; } // otherwise, use secular equation to find singular value RealScalar left = diag(k); RealScalar right; // was: = (k != actual_n-1) ? diag(k+1) : (diag(actual_n-1) + col0.matrix().norm()); if(k==actual_n-1) right = (diag(actual_n-1) + col0.matrix().norm()); else { // Skip deflated singular values, // recall that at this stage we assume that z[j]!=0 and all entries for which z[j]==0 have been put aside. // This should be equivalent to using perm[] Index l = k+1; while(col0(l)==Literal(0)) { ++l; eigen_internal_assert(l Literal(0)) ? left : right; // measure everything relative to shift Map diagShifted(m_workspace.data()+4*n, n); diagShifted = diag - shift; // initial guess RealScalar muPrev, muCur; if (shift == left) { muPrev = (right - left) * RealScalar(0.1); if (k == actual_n-1) muCur = right - left; else muCur = (right - left) * RealScalar(0.5); } else { muPrev = -(right - left) * RealScalar(0.1); muCur = -(right - left) * RealScalar(0.5); } RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift); RealScalar fCur = secularEq(muCur, col0, diag, perm, diagShifted, shift); if (abs(fPrev) < abs(fCur)) { swap(fPrev, fCur); swap(muPrev, muCur); } // rational interpolation: fit a function of the form a / mu + b through the two previous // iterates and use its zero to compute the next iterate bool useBisection = fPrev*fCur>Literal(0); while (fCur!=Literal(0) && abs(muCur - muPrev) > Literal(8) * NumTraits::epsilon() * numext::maxi(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits::epsilon() && !useBisection) { ++m_numIters; // Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples. RealScalar a = (fCur - fPrev) / (Literal(1)/muCur - Literal(1)/muPrev); RealScalar b = fCur - a / muCur; // And find mu such that f(mu)==0: RealScalar muZero = -a/b; RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift); #ifdef EIGEN_BDCSVD_SANITY_CHECKS assert((std::isfinite)(fZero)); #endif muPrev = muCur; fPrev = fCur; muCur = muZero; fCur = fZero; if (shift == left && (muCur < Literal(0) || muCur > right - left)) useBisection = true; if (shift == right && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true; if (abs(fCur)>abs(fPrev)) useBisection = true; } // fall back on bisection method if rational interpolation did not work if (useBisection) { #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << "useBisection for k = " << k << ", actual_n = " << actual_n << "\n"; #endif RealScalar leftShifted, rightShifted; if (shift == left) { // to avoid overflow, we must have mu > max(real_min, |z(k)|/sqrt(real_max)), // the factor 2 is to be more conservative leftShifted = numext::maxi( (std::numeric_limits::min)(), Literal(2) * abs(col0(k)) / sqrt((std::numeric_limits::max)()) ); // check that we did it right: eigen_internal_assert( (numext::isfinite)( (col0(k)/leftShifted)*(col0(k)/(diag(k)+shift+leftShifted)) ) ); // I don't understand why the case k==0 would be special there: // if (k == 0) rightShifted = right - left; else rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.51)); // theoretically we can take 0.5, but let's be safe } else { leftShifted = -(right - left) * RealScalar(0.51); if(k+1( (std::numeric_limits::min)(), abs(col0(k+1)) / sqrt((std::numeric_limits::max)()) ); else rightShifted = -(std::numeric_limits::min)(); } RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift); #if defined EIGEN_INTERNAL_DEBUGGING || defined EIGEN_BDCSVD_SANITY_CHECKS RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift); #endif #ifdef EIGEN_BDCSVD_SANITY_CHECKS if(!(std::isfinite)(fLeft)) std::cout << "f(" << leftShifted << ") =" << fLeft << " ; " << left << " " << shift << " " << right << "\n"; assert((std::isfinite)(fLeft)); if(!(std::isfinite)(fRight)) std::cout << "f(" << rightShifted << ") =" << fRight << " ; " << left << " " << shift << " " << right << "\n"; // assert((std::isfinite)(fRight)); #endif #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE if(!(fLeft * fRight<0)) { std::cout << "f(leftShifted) using leftShifted=" << leftShifted << " ; diagShifted(1:10):" << diagShifted.head(10).transpose() << "\n ; " << "left==shift=" << bool(left==shift) << " ; left-shift = " << (left-shift) << "\n"; std::cout << "k=" << k << ", " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; " << "[" << left << " .. " << right << "] -> [" << leftShifted << " " << rightShifted << "], shift=" << shift << " , f(right)=" << secularEq(0, col0, diag, perm, diagShifted, shift) << " == " << secularEq(right, col0, diag, perm, diag, 0) << "\n"; } #endif eigen_internal_assert(fLeft * fRight < Literal(0)); while (rightShifted - leftShifted > Literal(2) * NumTraits::epsilon() * numext::maxi(abs(leftShifted), abs(rightShifted))) { RealScalar midShifted = (leftShifted + rightShifted) / Literal(2); fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift); eigen_internal_assert((numext::isfinite)(fMid)); if (fLeft * fMid < Literal(0)) { rightShifted = midShifted; } else { leftShifted = midShifted; fLeft = fMid; } } muCur = (leftShifted + rightShifted) / Literal(2); } singVals[k] = shift + muCur; shifts[k] = shift; mus[k] = muCur; #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE if(k+1=singVals[k-1]); assert(singVals[k]>=diag(k)); #endif // perturb singular value slightly if it equals diagonal entry to avoid division by zero later // (deflation is supposed to avoid this from happening) // - this does no seem to be necessary anymore - // if (singVals[k] == left) singVals[k] *= 1 + NumTraits::epsilon(); // if (singVals[k] == right) singVals[k] *= 1 - NumTraits::epsilon(); } } // zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1) template void BDCSVD::perturbCol0 (const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat) { using std::sqrt; Index n = col0.size(); Index m = perm.size(); if(m==0) { zhat.setZero(); return; } Index last = perm(m-1); // The offset permits to skip deflated entries while computing zhat for (Index k = 0; k < n; ++k) { if (col0(k) == Literal(0)) // deflated zhat(k) = Literal(0); else { // see equation (3.6) RealScalar dk = diag(k); RealScalar prod = (singVals(last) + dk) * (mus(last) + (shifts(last) - dk)); #ifdef EIGEN_BDCSVD_SANITY_CHECKS if(prod<0) { std::cout << "k = " << k << " ; z(k)=" << col0(k) << ", diag(k)=" << dk << "\n"; std::cout << "prod = " << "(" << singVals(last) << " + " << dk << ") * (" << mus(last) << " + (" << shifts(last) << " - " << dk << "))" << "\n"; std::cout << " = " << singVals(last) + dk << " * " << mus(last) + (shifts(last) - dk) << "\n"; } assert(prod>=0); #endif for(Index l = 0; l=k && (l==0 || l-1>=m)) { std::cout << "Error in perturbCol0\n"; std::cout << " " << k << "/" << n << " " << l << "/" << m << " " << i << "/" << n << " ; " << col0(k) << " " << diag(k) << " " << "\n"; std::cout << " " <=0); #endif #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE if(i!=k && std::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 ) std::cout << " " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk)) << ") / (" << (diag(i)+dk) << " * " << (diag(i)-dk) << ")\n"; #endif } } #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(last) + dk) << " * " << mus(last) + shifts(last) << " - " << dk << "\n"; #endif RealScalar tmp = sqrt(prod); #ifdef EIGEN_BDCSVD_SANITY_CHECKS assert((std::isfinite)(tmp)); #endif zhat(k) = col0(k) > Literal(0) ? tmp : -tmp; } } } // compute singular vectors template void BDCSVD::computeSingVecs (const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V) { Index n = zhat.size(); Index m = perm.size(); for (Index k = 0; k < n; ++k) { if (zhat(k) == Literal(0)) { U.col(k) = VectorType::Unit(n+1, k); if (m_compV) V.col(k) = VectorType::Unit(n, k); } else { U.col(k).setZero(); for(Index l=0;l= 1, di almost null and zi non null. // We use a rotation to zero out zi applied to the left of M template void BDCSVD::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size) { using std::abs; using std::sqrt; using std::pow; Index start = firstCol + shift; RealScalar c = m_computed(start, start); RealScalar s = m_computed(start+i, start); RealScalar r = numext::hypot(c,s); if (r == Literal(0)) { m_computed(start+i, start+i) = Literal(0); return; } m_computed(start,start) = r; m_computed(start+i, start) = Literal(0); m_computed(start+i, start+i) = Literal(0); JacobiRotation J(c/r,-s/r); if (m_compU) m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J); else m_naiveU.applyOnTheRight(firstCol, firstCol+i, J); }// end deflation 43 // page 13 // i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M) // We apply two rotations to have zj = 0; // TODO deflation44 is still broken and not properly tested template void BDCSVD::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size) { using std::abs; using std::sqrt; using std::conj; using std::pow; RealScalar c = m_computed(firstColm+i, firstColm); RealScalar s = m_computed(firstColm+j, firstColm); RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s)); #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << "deflation 4.4: " << i << "," << j << " -> " << c << " " << s << " " << r << " ; " << m_computed(firstColm + i-1, firstColm) << " " << m_computed(firstColm + i, firstColm) << " " << m_computed(firstColm + i+1, firstColm) << " " << m_computed(firstColm + i+2, firstColm) << "\n"; std::cout << m_computed(firstColm + i-1, firstColm + i-1) << " " << m_computed(firstColm + i, firstColm+i) << " " << m_computed(firstColm + i+1, firstColm+i+1) << " " << m_computed(firstColm + i+2, firstColm+i+2) << "\n"; #endif if (r==Literal(0)) { m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); return; } c/=r; s/=r; m_computed(firstColm + i, firstColm) = r; m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i); m_computed(firstColm + j, firstColm) = Literal(0); JacobiRotation J(c,-s); if (m_compU) m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J); else m_naiveU.applyOnTheRight(firstColu+i, firstColu+j, J); if (m_compV) m_naiveV.middleRows(firstRowW, size).applyOnTheRight(firstColW + i, firstColW + j, J); }// end deflation 44 // acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive] template void BDCSVD::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift) { using std::sqrt; using std::abs; const Index length = lastCol + 1 - firstCol; Block col0(m_computed, firstCol+shift, firstCol+shift, length, 1); Diagonal fulldiag(m_computed); VectorBlock,Dynamic> diag(fulldiag, firstCol+shift, length); const RealScalar considerZero = (std::numeric_limits::min)(); RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff(); RealScalar epsilon_strict = numext::maxi(considerZero,NumTraits::epsilon() * maxDiag); RealScalar epsilon_coarse = Literal(8) * NumTraits::epsilon() * numext::maxi(col0.cwiseAbs().maxCoeff(), maxDiag); #ifdef EIGEN_BDCSVD_SANITY_CHECKS assert(m_naiveU.allFinite()); assert(m_naiveV.allFinite()); assert(m_computed.allFinite()); #endif #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << "\ndeflate:" << diag.head(k+1).transpose() << " | " << diag.segment(k+1,length-k-1).transpose() << "\n"; #endif //condition 4.1 if (diag(0) < epsilon_coarse) { #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << "deflation 4.1, because " << diag(0) << " < " << epsilon_coarse << "\n"; #endif diag(0) = epsilon_coarse; } //condition 4.2 for (Index i=1;i k) permutation[p] = j++; else if (j >= length) permutation[p] = i++; else if (diag(i) < diag(j)) permutation[p] = j++; else permutation[p] = i++; } } // If we have a total deflation, then we have to insert diag(0) at the right place if(total_deflation) { for(Index i=1; i0 && (abs(diag(i))1;--i) if( (diag(i) - diag(i-1)) < NumTraits::epsilon()*maxDiag ) { #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << "deflation 4.4 with i = " << i << " because " << diag(i) << " - " << diag(i-1) << " == " << (diag(i) - diag(i-1)) << " < " << NumTraits::epsilon()*/*diag(i)*/maxDiag << "\n"; #endif eigen_internal_assert(abs(diag(i) - diag(i-1)) BDCSVD::PlainObject> MatrixBase::bdcSvd(unsigned int computationOptions) const { return BDCSVD(*this, computationOptions); } #endif } // end namespace Eigen #endif