Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-02-22 10:34:43

0001 // This file is part of Eigen, a lightweight C++ template library
0002 // for linear algebra.
0003 //
0004 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
0005 //
0006 // This Source Code Form is subject to the terms of the Mozilla
0007 // Public License v. 2.0. If a copy of the MPL was not distributed
0008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
0009 
0010 #ifndef EIGEN_SPARSEMATRIX_H
0011 #define EIGEN_SPARSEMATRIX_H
0012 
0013 namespace Eigen { 
0014 
0015 /** \ingroup SparseCore_Module
0016   *
0017   * \class SparseMatrix
0018   *
0019   * \brief A versatible sparse matrix representation
0020   *
0021   * This class implements a more versatile variants of the common \em compressed row/column storage format.
0022   * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
0023   * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
0024   * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
0025   * can be done with limited memory reallocation and copies.
0026   *
0027   * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
0028   * compatible with many library.
0029   *
0030   * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
0031   *
0032   * \tparam _Scalar the scalar type, i.e. the type of the coefficients
0033   * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
0034   *                 is ColMajor or RowMajor. The default is 0 which means column-major.
0035   * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
0036   *
0037   * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
0038   *          whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
0039   *          Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
0040   *
0041   * This class can be extended with the help of the plugin mechanism described on the page
0042   * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
0043   */
0044 
0045 namespace internal {
0046 template<typename _Scalar, int _Options, typename _StorageIndex>
0047 struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
0048 {
0049   typedef _Scalar Scalar;
0050   typedef _StorageIndex StorageIndex;
0051   typedef Sparse StorageKind;
0052   typedef MatrixXpr XprKind;
0053   enum {
0054     RowsAtCompileTime = Dynamic,
0055     ColsAtCompileTime = Dynamic,
0056     MaxRowsAtCompileTime = Dynamic,
0057     MaxColsAtCompileTime = Dynamic,
0058     Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
0059     SupportedAccessPatterns = InnerRandomAccessPattern
0060   };
0061 };
0062 
0063 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
0064 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
0065 {
0066   typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
0067   typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
0068   typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
0069 
0070   typedef _Scalar Scalar;
0071   typedef Dense StorageKind;
0072   typedef _StorageIndex StorageIndex;
0073   typedef MatrixXpr XprKind;
0074 
0075   enum {
0076     RowsAtCompileTime = Dynamic,
0077     ColsAtCompileTime = 1,
0078     MaxRowsAtCompileTime = Dynamic,
0079     MaxColsAtCompileTime = 1,
0080     Flags = LvalueBit
0081   };
0082 };
0083 
0084 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
0085 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
0086  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
0087 {
0088   enum {
0089     Flags = 0
0090   };
0091 };
0092 
0093 } // end namespace internal
0094 
0095 template<typename _Scalar, int _Options, typename _StorageIndex>
0096 class SparseMatrix
0097   : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
0098 {
0099     typedef SparseCompressedBase<SparseMatrix> Base;
0100     using Base::convert_index;
0101     friend class SparseVector<_Scalar,0,_StorageIndex>;
0102     template<typename, typename, typename, typename, typename>
0103     friend struct internal::Assignment;
0104   public:
0105     using Base::isCompressed;
0106     using Base::nonZeros;
0107     EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
0108     using Base::operator+=;
0109     using Base::operator-=;
0110 
0111     typedef MappedSparseMatrix<Scalar,Flags> Map;
0112     typedef Diagonal<SparseMatrix> DiagonalReturnType;
0113     typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
0114     typedef typename Base::InnerIterator InnerIterator;
0115     typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
0116     
0117 
0118     using Base::IsRowMajor;
0119     typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
0120     enum {
0121       Options = _Options
0122     };
0123 
0124     typedef typename Base::IndexVector IndexVector;
0125     typedef typename Base::ScalarVector ScalarVector;
0126   protected:
0127     typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
0128 
0129     Index m_outerSize;
0130     Index m_innerSize;
0131     StorageIndex* m_outerIndex;
0132     StorageIndex* m_innerNonZeros;     // optional, if null then the data is compressed
0133     Storage m_data;
0134 
0135   public:
0136     
0137     /** \returns the number of rows of the matrix */
0138     inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
0139     /** \returns the number of columns of the matrix */
0140     inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
0141 
0142     /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
0143     inline Index innerSize() const { return m_innerSize; }
0144     /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
0145     inline Index outerSize() const { return m_outerSize; }
0146     
0147     /** \returns a const pointer to the array of values.
0148       * This function is aimed at interoperability with other libraries.
0149       * \sa innerIndexPtr(), outerIndexPtr() */
0150     inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
0151     /** \returns a non-const pointer to the array of values.
0152       * This function is aimed at interoperability with other libraries.
0153       * \sa innerIndexPtr(), outerIndexPtr() */
0154     inline Scalar* valuePtr() { return m_data.valuePtr(); }
0155 
0156     /** \returns a const pointer to the array of inner indices.
0157       * This function is aimed at interoperability with other libraries.
0158       * \sa valuePtr(), outerIndexPtr() */
0159     inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
0160     /** \returns a non-const pointer to the array of inner indices.
0161       * This function is aimed at interoperability with other libraries.
0162       * \sa valuePtr(), outerIndexPtr() */
0163     inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
0164 
0165     /** \returns a const pointer to the array of the starting positions of the inner vectors.
0166       * This function is aimed at interoperability with other libraries.
0167       * \sa valuePtr(), innerIndexPtr() */
0168     inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
0169     /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
0170       * This function is aimed at interoperability with other libraries.
0171       * \sa valuePtr(), innerIndexPtr() */
0172     inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
0173 
0174     /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
0175       * This function is aimed at interoperability with other libraries.
0176       * \warning it returns the null pointer 0 in compressed mode */
0177     inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
0178     /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
0179       * This function is aimed at interoperability with other libraries.
0180       * \warning it returns the null pointer 0 in compressed mode */
0181     inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
0182 
0183     /** \internal */
0184     inline Storage& data() { return m_data; }
0185     /** \internal */
0186     inline const Storage& data() const { return m_data; }
0187 
0188     /** \returns the value of the matrix at position \a i, \a j
0189       * This function returns Scalar(0) if the element is an explicit \em zero */
0190     inline Scalar coeff(Index row, Index col) const
0191     {
0192       eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
0193       
0194       const Index outer = IsRowMajor ? row : col;
0195       const Index inner = IsRowMajor ? col : row;
0196       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
0197       return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
0198     }
0199 
0200     /** \returns a non-const reference to the value of the matrix at position \a i, \a j
0201       *
0202       * If the element does not exist then it is inserted via the insert(Index,Index) function
0203       * which itself turns the matrix into a non compressed form if that was not the case.
0204       *
0205       * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
0206       * function if the element does not already exist.
0207       */
0208     inline Scalar& coeffRef(Index row, Index col)
0209     {
0210       eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
0211       
0212       const Index outer = IsRowMajor ? row : col;
0213       const Index inner = IsRowMajor ? col : row;
0214 
0215       Index start = m_outerIndex[outer];
0216       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
0217       eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
0218       if(end<=start)
0219         return insert(row,col);
0220       const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
0221       if((p<end) && (m_data.index(p)==inner))
0222         return m_data.value(p);
0223       else
0224         return insert(row,col);
0225     }
0226 
0227     /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
0228       * The non zero coefficient must \b not already exist.
0229       *
0230       * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
0231       * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
0232       * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
0233       * inserted by increasing outer-indices.
0234       * 
0235       * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
0236       * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
0237       *
0238       * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
0239       * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
0240       *
0241       */
0242     Scalar& insert(Index row, Index col);
0243 
0244   public:
0245 
0246     /** Removes all non zeros but keep allocated memory
0247       *
0248       * This function does not free the currently allocated memory. To release as much as memory as possible,
0249       * call \code mat.data().squeeze(); \endcode after resizing it.
0250       * 
0251       * \sa resize(Index,Index), data()
0252       */
0253     inline void setZero()
0254     {
0255       m_data.clear();
0256       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
0257       if(m_innerNonZeros)
0258         memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
0259     }
0260 
0261     /** Preallocates \a reserveSize non zeros.
0262       *
0263       * Precondition: the matrix must be in compressed mode. */
0264     inline void reserve(Index reserveSize)
0265     {
0266       eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
0267       m_data.reserve(reserveSize);
0268     }
0269     
0270     #ifdef EIGEN_PARSED_BY_DOXYGEN
0271     /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
0272       *
0273       * This function turns the matrix in non-compressed mode.
0274       * 
0275       * The type \c SizesType must expose the following interface:
0276         \code
0277         typedef value_type;
0278         const value_type& operator[](i) const;
0279         \endcode
0280       * for \c i in the [0,this->outerSize()[ range.
0281       * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
0282       */
0283     template<class SizesType>
0284     inline void reserve(const SizesType& reserveSizes);
0285     #else
0286     template<class SizesType>
0287     inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
0288     #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
0289         typename
0290     #endif
0291         SizesType::value_type())
0292     {
0293       EIGEN_UNUSED_VARIABLE(enableif);
0294       reserveInnerVectors(reserveSizes);
0295     }
0296     #endif // EIGEN_PARSED_BY_DOXYGEN
0297   protected:
0298     template<class SizesType>
0299     inline void reserveInnerVectors(const SizesType& reserveSizes)
0300     {
0301       if(isCompressed())
0302       {
0303         Index totalReserveSize = 0;
0304         // turn the matrix into non-compressed mode
0305         m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
0306         if (!m_innerNonZeros) internal::throw_std_bad_alloc();
0307         
0308         // temporarily use m_innerSizes to hold the new starting points.
0309         StorageIndex* newOuterIndex = m_innerNonZeros;
0310         
0311         StorageIndex count = 0;
0312         for(Index j=0; j<m_outerSize; ++j)
0313         {
0314           newOuterIndex[j] = count;
0315           count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
0316           totalReserveSize += reserveSizes[j];
0317         }
0318         m_data.reserve(totalReserveSize);
0319         StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
0320         for(Index j=m_outerSize-1; j>=0; --j)
0321         {
0322           StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
0323           for(Index i=innerNNZ-1; i>=0; --i)
0324           {
0325             m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
0326             m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
0327           }
0328           previousOuterIndex = m_outerIndex[j];
0329           m_outerIndex[j] = newOuterIndex[j];
0330           m_innerNonZeros[j] = innerNNZ;
0331         }
0332         if(m_outerSize>0)
0333           m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
0334         
0335         m_data.resize(m_outerIndex[m_outerSize]);
0336       }
0337       else
0338       {
0339         StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
0340         if (!newOuterIndex) internal::throw_std_bad_alloc();
0341         
0342         StorageIndex count = 0;
0343         for(Index j=0; j<m_outerSize; ++j)
0344         {
0345           newOuterIndex[j] = count;
0346           StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
0347           StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
0348           count += toReserve + m_innerNonZeros[j];
0349         }
0350         newOuterIndex[m_outerSize] = count;
0351         
0352         m_data.resize(count);
0353         for(Index j=m_outerSize-1; j>=0; --j)
0354         {
0355           Index offset = newOuterIndex[j] - m_outerIndex[j];
0356           if(offset>0)
0357           {
0358             StorageIndex innerNNZ = m_innerNonZeros[j];
0359             for(Index i=innerNNZ-1; i>=0; --i)
0360             {
0361               m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
0362               m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
0363             }
0364           }
0365         }
0366         
0367         std::swap(m_outerIndex, newOuterIndex);
0368         std::free(newOuterIndex);
0369       }
0370       
0371     }
0372   public:
0373 
0374     //--- low level purely coherent filling ---
0375 
0376     /** \internal
0377       * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
0378       * - the nonzero does not already exist
0379       * - the new coefficient is the last one according to the storage order
0380       *
0381       * Before filling a given inner vector you must call the statVec(Index) function.
0382       *
0383       * After an insertion session, you should call the finalize() function.
0384       *
0385       * \sa insert, insertBackByOuterInner, startVec */
0386     inline Scalar& insertBack(Index row, Index col)
0387     {
0388       return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
0389     }
0390 
0391     /** \internal
0392       * \sa insertBack, startVec */
0393     inline Scalar& insertBackByOuterInner(Index outer, Index inner)
0394     {
0395       eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
0396       eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
0397       Index p = m_outerIndex[outer+1];
0398       ++m_outerIndex[outer+1];
0399       m_data.append(Scalar(0), inner);
0400       return m_data.value(p);
0401     }
0402 
0403     /** \internal
0404       * \warning use it only if you know what you are doing */
0405     inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
0406     {
0407       Index p = m_outerIndex[outer+1];
0408       ++m_outerIndex[outer+1];
0409       m_data.append(Scalar(0), inner);
0410       return m_data.value(p);
0411     }
0412 
0413     /** \internal
0414       * \sa insertBack, insertBackByOuterInner */
0415     inline void startVec(Index outer)
0416     {
0417       eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
0418       eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
0419       m_outerIndex[outer+1] = m_outerIndex[outer];
0420     }
0421 
0422     /** \internal
0423       * Must be called after inserting a set of non zero entries using the low level compressed API.
0424       */
0425     inline void finalize()
0426     {
0427       if(isCompressed())
0428       {
0429         StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
0430         Index i = m_outerSize;
0431         // find the last filled column
0432         while (i>=0 && m_outerIndex[i]==0)
0433           --i;
0434         ++i;
0435         while (i<=m_outerSize)
0436         {
0437           m_outerIndex[i] = size;
0438           ++i;
0439         }
0440       }
0441     }
0442 
0443     //---
0444 
0445     template<typename InputIterators>
0446     void setFromTriplets(const InputIterators& begin, const InputIterators& end);
0447 
0448     template<typename InputIterators,typename DupFunctor>
0449     void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
0450 
0451     void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
0452 
0453     template<typename DupFunctor>
0454     void collapseDuplicates(DupFunctor dup_func = DupFunctor());
0455 
0456     //---
0457     
0458     /** \internal
0459       * same as insert(Index,Index) except that the indices are given relative to the storage order */
0460     Scalar& insertByOuterInner(Index j, Index i)
0461     {
0462       return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
0463     }
0464 
0465     /** Turns the matrix into the \em compressed format.
0466       */
0467     void makeCompressed()
0468     {
0469       if(isCompressed())
0470         return;
0471       
0472       eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
0473       
0474       Index oldStart = m_outerIndex[1];
0475       m_outerIndex[1] = m_innerNonZeros[0];
0476       for(Index j=1; j<m_outerSize; ++j)
0477       {
0478         Index nextOldStart = m_outerIndex[j+1];
0479         Index offset = oldStart - m_outerIndex[j];
0480         if(offset>0)
0481         {
0482           for(Index k=0; k<m_innerNonZeros[j]; ++k)
0483           {
0484             m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
0485             m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
0486           }
0487         }
0488         m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
0489         oldStart = nextOldStart;
0490       }
0491       std::free(m_innerNonZeros);
0492       m_innerNonZeros = 0;
0493       m_data.resize(m_outerIndex[m_outerSize]);
0494       m_data.squeeze();
0495     }
0496 
0497     /** Turns the matrix into the uncompressed mode */
0498     void uncompress()
0499     {
0500       if(m_innerNonZeros != 0)
0501         return; 
0502       m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
0503       for (Index i = 0; i < m_outerSize; i++)
0504       {
0505         m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; 
0506       }
0507     }
0508 
0509     /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
0510     void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
0511     {
0512       prune(default_prunning_func(reference,epsilon));
0513     }
0514     
0515     /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
0516       * The functor type \a KeepFunc must implement the following function:
0517       * \code
0518       * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
0519       * \endcode
0520       * \sa prune(Scalar,RealScalar)
0521       */
0522     template<typename KeepFunc>
0523     void prune(const KeepFunc& keep = KeepFunc())
0524     {
0525       // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
0526       makeCompressed();
0527 
0528       StorageIndex k = 0;
0529       for(Index j=0; j<m_outerSize; ++j)
0530       {
0531         Index previousStart = m_outerIndex[j];
0532         m_outerIndex[j] = k;
0533         Index end = m_outerIndex[j+1];
0534         for(Index i=previousStart; i<end; ++i)
0535         {
0536           if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
0537           {
0538             m_data.value(k) = m_data.value(i);
0539             m_data.index(k) = m_data.index(i);
0540             ++k;
0541           }
0542         }
0543       }
0544       m_outerIndex[m_outerSize] = k;
0545       m_data.resize(k,0);
0546     }
0547 
0548     /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
0549       *
0550       * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
0551       * and the storage of the out of bounds coefficients is kept and reserved.
0552       * Call makeCompressed() to pack the entries and squeeze extra memory.
0553       *
0554       * \sa reserve(), setZero(), makeCompressed()
0555       */
0556     void conservativeResize(Index rows, Index cols) 
0557     {
0558       // No change
0559       if (this->rows() == rows && this->cols() == cols) return;
0560       
0561       // If one dimension is null, then there is nothing to be preserved
0562       if(rows==0 || cols==0) return resize(rows,cols);
0563 
0564       Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
0565       Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
0566       StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
0567 
0568       // Deals with inner non zeros
0569       if (m_innerNonZeros)
0570       {
0571         // Resize m_innerNonZeros
0572         StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
0573         if (!newInnerNonZeros) internal::throw_std_bad_alloc();
0574         m_innerNonZeros = newInnerNonZeros;
0575         
0576         for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)          
0577           m_innerNonZeros[i] = 0;
0578       } 
0579       else if (innerChange < 0) 
0580       {
0581         // Inner size decreased: allocate a new m_innerNonZeros
0582         m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
0583         if (!m_innerNonZeros) internal::throw_std_bad_alloc();
0584         for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
0585           m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
0586         for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
0587           m_innerNonZeros[i] = 0;
0588       }
0589       
0590       // Change the m_innerNonZeros in case of a decrease of inner size
0591       if (m_innerNonZeros && innerChange < 0)
0592       {
0593         for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
0594         {
0595           StorageIndex &n = m_innerNonZeros[i];
0596           StorageIndex start = m_outerIndex[i];
0597           while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; 
0598         }
0599       }
0600       
0601       m_innerSize = newInnerSize;
0602 
0603       // Re-allocate outer index structure if necessary
0604       if (outerChange == 0)
0605         return;
0606           
0607       StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
0608       if (!newOuterIndex) internal::throw_std_bad_alloc();
0609       m_outerIndex = newOuterIndex;
0610       if (outerChange > 0)
0611       {
0612         StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
0613         for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)          
0614           m_outerIndex[i] = lastIdx; 
0615       }
0616       m_outerSize += outerChange;
0617     }
0618     
0619     /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
0620       * 
0621       * This function does not free the currently allocated memory. To release as much as memory as possible,
0622       * call \code mat.data().squeeze(); \endcode after resizing it.
0623       * 
0624       * \sa reserve(), setZero()
0625       */
0626     void resize(Index rows, Index cols)
0627     {
0628       const Index outerSize = IsRowMajor ? rows : cols;
0629       m_innerSize = IsRowMajor ? cols : rows;
0630       m_data.clear();
0631       if (m_outerSize != outerSize || m_outerSize==0)
0632       {
0633         std::free(m_outerIndex);
0634         m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
0635         if (!m_outerIndex) internal::throw_std_bad_alloc();
0636         
0637         m_outerSize = outerSize;
0638       }
0639       if(m_innerNonZeros)
0640       {
0641         std::free(m_innerNonZeros);
0642         m_innerNonZeros = 0;
0643       }
0644       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
0645     }
0646 
0647     /** \internal
0648       * Resize the nonzero vector to \a size */
0649     void resizeNonZeros(Index size)
0650     {
0651       m_data.resize(size);
0652     }
0653 
0654     /** \returns a const expression of the diagonal coefficients. */
0655     const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
0656     
0657     /** \returns a read-write expression of the diagonal coefficients.
0658       * \warning If the diagonal entries are written, then all diagonal
0659       * entries \b must already exist, otherwise an assertion will be raised.
0660       */
0661     DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
0662 
0663     /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
0664     inline SparseMatrix()
0665       : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0666     {
0667       check_template_parameters();
0668       resize(0, 0);
0669     }
0670 
0671     /** Constructs a \a rows \c x \a cols empty matrix */
0672     inline SparseMatrix(Index rows, Index cols)
0673       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0674     {
0675       check_template_parameters();
0676       resize(rows, cols);
0677     }
0678 
0679     /** Constructs a sparse matrix from the sparse expression \a other */
0680     template<typename OtherDerived>
0681     inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
0682       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0683     {
0684       EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
0685         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
0686       check_template_parameters();
0687       const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
0688       if (needToTranspose)
0689         *this = other.derived();
0690       else
0691       {
0692         #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
0693           EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
0694         #endif
0695         internal::call_assignment_no_alias(*this, other.derived());
0696       }
0697     }
0698     
0699     /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
0700     template<typename OtherDerived, unsigned int UpLo>
0701     inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
0702       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0703     {
0704       check_template_parameters();
0705       Base::operator=(other);
0706     }
0707 
0708     /** Copy constructor (it performs a deep copy) */
0709     inline SparseMatrix(const SparseMatrix& other)
0710       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0711     {
0712       check_template_parameters();
0713       *this = other.derived();
0714     }
0715 
0716     /** \brief Copy constructor with in-place evaluation */
0717     template<typename OtherDerived>
0718     SparseMatrix(const ReturnByValue<OtherDerived>& other)
0719       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0720     {
0721       check_template_parameters();
0722       initAssignment(other);
0723       other.evalTo(*this);
0724     }
0725     
0726     /** \brief Copy constructor with in-place evaluation */
0727     template<typename OtherDerived>
0728     explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
0729       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0730     {
0731       check_template_parameters();
0732       *this = other.derived();
0733     }
0734 
0735     /** Swaps the content of two sparse matrices of the same type.
0736       * This is a fast operation that simply swaps the underlying pointers and parameters. */
0737     inline void swap(SparseMatrix& other)
0738     {
0739       //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
0740       std::swap(m_outerIndex, other.m_outerIndex);
0741       std::swap(m_innerSize, other.m_innerSize);
0742       std::swap(m_outerSize, other.m_outerSize);
0743       std::swap(m_innerNonZeros, other.m_innerNonZeros);
0744       m_data.swap(other.m_data);
0745     }
0746 
0747     /** Sets *this to the identity matrix.
0748       * This function also turns the matrix into compressed mode, and drop any reserved memory. */
0749     inline void setIdentity()
0750     {
0751       eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
0752       this->m_data.resize(rows());
0753       Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
0754       Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
0755       Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
0756       std::free(m_innerNonZeros);
0757       m_innerNonZeros = 0;
0758     }
0759     inline SparseMatrix& operator=(const SparseMatrix& other)
0760     {
0761       if (other.isRValue())
0762       {
0763         swap(other.const_cast_derived());
0764       }
0765       else if(this!=&other)
0766       {
0767         #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
0768           EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
0769         #endif
0770         initAssignment(other);
0771         if(other.isCompressed())
0772         {
0773           internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
0774           m_data = other.m_data;
0775         }
0776         else
0777         {
0778           Base::operator=(other);
0779         }
0780       }
0781       return *this;
0782     }
0783 
0784 #ifndef EIGEN_PARSED_BY_DOXYGEN
0785     template<typename OtherDerived>
0786     inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
0787     { return Base::operator=(other.derived()); }
0788 
0789     template<typename Lhs, typename Rhs>
0790     inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
0791 #endif // EIGEN_PARSED_BY_DOXYGEN
0792 
0793     template<typename OtherDerived>
0794     EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
0795 
0796     friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
0797     {
0798       EIGEN_DBG_SPARSE(
0799         s << "Nonzero entries:\n";
0800         if(m.isCompressed())
0801         {
0802           for (Index i=0; i<m.nonZeros(); ++i)
0803             s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
0804         }
0805         else
0806         {
0807           for (Index i=0; i<m.outerSize(); ++i)
0808           {
0809             Index p = m.m_outerIndex[i];
0810             Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
0811             Index k=p;
0812             for (; k<pe; ++k) {
0813               s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
0814             }
0815             for (; k<m.m_outerIndex[i+1]; ++k) {
0816               s << "(_,_) ";
0817             }
0818           }
0819         }
0820         s << std::endl;
0821         s << std::endl;
0822         s << "Outer pointers:\n";
0823         for (Index i=0; i<m.outerSize(); ++i) {
0824           s << m.m_outerIndex[i] << " ";
0825         }
0826         s << " $" << std::endl;
0827         if(!m.isCompressed())
0828         {
0829           s << "Inner non zeros:\n";
0830           for (Index i=0; i<m.outerSize(); ++i) {
0831             s << m.m_innerNonZeros[i] << " ";
0832           }
0833           s << " $" << std::endl;
0834         }
0835         s << std::endl;
0836       );
0837       s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
0838       return s;
0839     }
0840 
0841     /** Destructor */
0842     inline ~SparseMatrix()
0843     {
0844       std::free(m_outerIndex);
0845       std::free(m_innerNonZeros);
0846     }
0847 
0848     /** Overloaded for performance */
0849     Scalar sum() const;
0850     
0851 #   ifdef EIGEN_SPARSEMATRIX_PLUGIN
0852 #     include EIGEN_SPARSEMATRIX_PLUGIN
0853 #   endif
0854 
0855 protected:
0856 
0857     template<typename Other>
0858     void initAssignment(const Other& other)
0859     {
0860       resize(other.rows(), other.cols());
0861       if(m_innerNonZeros)
0862       {
0863         std::free(m_innerNonZeros);
0864         m_innerNonZeros = 0;
0865       }
0866     }
0867 
0868     /** \internal
0869       * \sa insert(Index,Index) */
0870     EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
0871 
0872     /** \internal
0873       * A vector object that is equal to 0 everywhere but v at the position i */
0874     class SingletonVector
0875     {
0876         StorageIndex m_index;
0877         StorageIndex m_value;
0878       public:
0879         typedef StorageIndex value_type;
0880         SingletonVector(Index i, Index v)
0881           : m_index(convert_index(i)), m_value(convert_index(v))
0882         {}
0883 
0884         StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
0885     };
0886 
0887     /** \internal
0888       * \sa insert(Index,Index) */
0889     EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
0890 
0891 public:
0892     /** \internal
0893       * \sa insert(Index,Index) */
0894     EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
0895     {
0896       const Index outer = IsRowMajor ? row : col;
0897       const Index inner = IsRowMajor ? col : row;
0898 
0899       eigen_assert(!isCompressed());
0900       eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
0901 
0902       Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
0903       m_data.index(p) = convert_index(inner);
0904       return (m_data.value(p) = Scalar(0));
0905     }
0906 protected:
0907     struct IndexPosPair {
0908       IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
0909       Index i;
0910       Index p;
0911     };
0912 
0913     /** \internal assign \a diagXpr to the diagonal of \c *this
0914       * There are different strategies:
0915       *   1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
0916       *   2 - otherwise, for each diagonal coeff,
0917       *     2.a - if it already exists, then we update it,
0918       *     2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
0919       *     2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
0920       *   3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
0921       * 
0922       * TODO: some piece of code could be isolated and reused for a general in-place update strategy.
0923       * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
0924       *       then it *might* be better to disable case 2.b since they will have to be copied anyway.
0925       */
0926     template<typename DiagXpr, typename Func>
0927     void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
0928     {
0929       Index n = diagXpr.size();
0930 
0931       const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
0932       if(overwrite)
0933       {
0934         if((this->rows()!=n) || (this->cols()!=n))
0935           this->resize(n, n);
0936       }
0937 
0938       if(m_data.size()==0 || overwrite)
0939       {
0940         typedef Array<StorageIndex,Dynamic,1> ArrayXI;  
0941         this->makeCompressed();
0942         this->resizeNonZeros(n);
0943         Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
0944         Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
0945         Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
0946         values.setZero();
0947         internal::call_assignment_no_alias(values, diagXpr, assignFunc);
0948       }
0949       else
0950       {
0951         bool isComp = isCompressed();
0952         internal::evaluator<DiagXpr> diaEval(diagXpr);
0953         std::vector<IndexPosPair> newEntries;
0954 
0955         // 1 - try in-place update and record insertion failures
0956         for(Index i = 0; i<n; ++i)
0957         {
0958           internal::LowerBoundIndex lb = this->lower_bound(i,i);
0959           Index p = lb.value;
0960           if(lb.found)
0961           {
0962             // the coeff already exists
0963             assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
0964           }
0965           else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
0966           {
0967             // non compressed mode with local room for inserting one element
0968             m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
0969             m_innerNonZeros[i]++;
0970             m_data.value(p) = Scalar(0);
0971             m_data.index(p) = StorageIndex(i);
0972             assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
0973           }
0974           else
0975           {
0976             // defer insertion
0977             newEntries.push_back(IndexPosPair(i,p));
0978           }
0979         }
0980         // 2 - insert deferred entries
0981         Index n_entries = Index(newEntries.size());
0982         if(n_entries>0)
0983         {
0984           Storage newData(m_data.size()+n_entries);
0985           Index prev_p = 0;
0986           Index prev_i = 0;
0987           for(Index k=0; k<n_entries;++k)
0988           {
0989             Index i = newEntries[k].i;
0990             Index p = newEntries[k].p;
0991             internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
0992             internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
0993             for(Index j=prev_i;j<i;++j)
0994               m_outerIndex[j+1] += k;
0995             if(!isComp)
0996               m_innerNonZeros[i]++;
0997             prev_p = p;
0998             prev_i = i;
0999             newData.value(p+k) = Scalar(0);
1000             newData.index(p+k) = StorageIndex(i);
1001             assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002           }
1003           {
1004             internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005             internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006             for(Index j=prev_i+1;j<=m_outerSize;++j)
1007               m_outerIndex[j] += n_entries;
1008           }
1009           m_data.swap(newData);
1010         }
1011       }
1012     }
1013 
1014 private:
1015   static void check_template_parameters()
1016   {
1017     EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018     EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019   }
1020 
1021   struct default_prunning_func {
1022     default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023     inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024     {
1025       return !internal::isMuchSmallerThan(value, reference, epsilon);
1026     }
1027     Scalar reference;
1028     RealScalar epsilon;
1029   };
1030 };
1031 
1032 namespace internal {
1033 
1034 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036 {
1037   enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038   typedef typename SparseMatrixType::Scalar Scalar;
1039   typedef typename SparseMatrixType::StorageIndex StorageIndex;
1040   SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1041 
1042   if(begin!=end)
1043   {
1044     // pass 1: count the nnz per inner-vector
1045     typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046     wi.setZero();
1047     for(InputIterator it(begin); it!=end; ++it)
1048     {
1049       eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050       wi(IsRowMajor ? it->col() : it->row())++;
1051     }
1052 
1053     // pass 2: insert all the elements into trMat
1054     trMat.reserve(wi);
1055     for(InputIterator it(begin); it!=end; ++it)
1056       trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057 
1058     // pass 3:
1059     trMat.collapseDuplicates(dup_func);
1060   }
1061 
1062   // pass 4: transposed copy -> implicit sorting
1063   mat = trMat;
1064 }
1065 
1066 }
1067 
1068 
1069 /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
1070   *
1071   * A \em triplet is a tuple (i,j,value) defining a non-zero element.
1072   * The input list of triplets does not have to be sorted, and can contains duplicated elements.
1073   * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
1074   * This is a \em O(n) operation, with \em n the number of triplet elements.
1075   * The initial contents of \c *this is destroyed.
1076   * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
1077   * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
1078   *
1079   * The \a InputIterators value_type must provide the following interface:
1080   * \code
1081   * Scalar value() const; // the value
1082   * Scalar row() const;   // the row index i
1083   * Scalar col() const;   // the column index j
1084   * \endcode
1085   * See for instance the Eigen::Triplet template class.
1086   *
1087   * Here is a typical usage example:
1088   * \code
1089     typedef Triplet<double> T;
1090     std::vector<T> tripletList;
1091     tripletList.reserve(estimation_of_entries);
1092     for(...)
1093     {
1094       // ...
1095       tripletList.push_back(T(i,j,v_ij));
1096     }
1097     SparseMatrixType m(rows,cols);
1098     m.setFromTriplets(tripletList.begin(), tripletList.end());
1099     // m is ready to go!
1100   * \endcode
1101   *
1102   * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
1103   * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
1104   * be explicitly stored into a std::vector for instance.
1105   */
1106 template<typename Scalar, int _Options, typename _StorageIndex>
1107 template<typename InputIterators>
1108 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1109 {
1110   internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1111 }
1112 
1113 /** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
1114   * \code
1115   * value = dup_func(OldValue, NewValue)
1116   * \endcode 
1117   * Here is a C++11 example keeping the latest entry only:
1118   * \code
1119   * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1120   * \endcode
1121   */
1122 template<typename Scalar, int _Options, typename _StorageIndex>
1123 template<typename InputIterators,typename DupFunctor>
1124 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125 {
1126   internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1127 }
1128 
1129 /** \internal */
1130 template<typename Scalar, int _Options, typename _StorageIndex>
1131 template<typename DupFunctor>
1132 void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
1133 {
1134   eigen_assert(!isCompressed());
1135   // TODO, in practice we should be able to use m_innerNonZeros for that task
1136   IndexVector wi(innerSize());
1137   wi.fill(-1);
1138   StorageIndex count = 0;
1139   // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1140   for(Index j=0; j<outerSize(); ++j)
1141   {
1142     StorageIndex start   = count;
1143     Index oldEnd  = m_outerIndex[j]+m_innerNonZeros[j];
1144     for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145     {
1146       Index i = m_data.index(k);
1147       if(wi(i)>=start)
1148       {
1149         // we already meet this entry => accumulate it
1150         m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151       }
1152       else
1153       {
1154         m_data.value(count) = m_data.value(k);
1155         m_data.index(count) = m_data.index(k);
1156         wi(i) = count;
1157         ++count;
1158       }
1159     }
1160     m_outerIndex[j] = start;
1161   }
1162   m_outerIndex[m_outerSize] = count;
1163 
1164   // turn the matrix into compressed form
1165   std::free(m_innerNonZeros);
1166   m_innerNonZeros = 0;
1167   m_data.resize(m_outerIndex[m_outerSize]);
1168 }
1169 
1170 template<typename Scalar, int _Options, typename _StorageIndex>
1171 template<typename OtherDerived>
1172 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1173 {
1174   EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1175         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1176 
1177   #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1178     EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179   #endif
1180       
1181   const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182   if (needToTranspose)
1183   {
1184     #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1185       EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186     #endif
1187     // two passes algorithm:
1188     //  1 - compute the number of coeffs per dest inner vector
1189     //  2 - do the actual copy/eval
1190     // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1191     typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1192     typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193     typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1194     OtherCopy otherCopy(other.derived());
1195     OtherCopyEval otherCopyEval(otherCopy);
1196 
1197     SparseMatrix dest(other.rows(),other.cols());
1198     Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1199 
1200     // pass 1
1201     // FIXME the above copy could be merged with that pass
1202     for (Index j=0; j<otherCopy.outerSize(); ++j)
1203       for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204         ++dest.m_outerIndex[it.index()];
1205 
1206     // prefix sum
1207     StorageIndex count = 0;
1208     IndexVector positions(dest.outerSize());
1209     for (Index j=0; j<dest.outerSize(); ++j)
1210     {
1211       StorageIndex tmp = dest.m_outerIndex[j];
1212       dest.m_outerIndex[j] = count;
1213       positions[j] = count;
1214       count += tmp;
1215     }
1216     dest.m_outerIndex[dest.outerSize()] = count;
1217     // alloc
1218     dest.m_data.resize(count);
1219     // pass 2
1220     for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221     {
1222       for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223       {
1224         Index pos = positions[it.index()]++;
1225         dest.m_data.index(pos) = j;
1226         dest.m_data.value(pos) = it.value();
1227       }
1228     }
1229     this->swap(dest);
1230     return *this;
1231   }
1232   else
1233   {
1234     if(other.isRValue())
1235     {
1236       initAssignment(other.derived());
1237     }
1238     // there is no special optimization
1239     return Base::operator=(other.derived());
1240   }
1241 }
1242 
1243 template<typename _Scalar, int _Options, typename _StorageIndex>
1244 typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
1245 {
1246   eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247   
1248   const Index outer = IsRowMajor ? row : col;
1249   const Index inner = IsRowMajor ? col : row;
1250   
1251   if(isCompressed())
1252   {
1253     if(nonZeros()==0)
1254     {
1255       // reserve space if not already done
1256       if(m_data.allocatedSize()==0)
1257         m_data.reserve(2*m_innerSize);
1258       
1259       // turn the matrix into non-compressed mode
1260       m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261       if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262       
1263       memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264       
1265       // pack all inner-vectors to the end of the pre-allocated space
1266       // and allocate the entire free-space to the first inner-vector
1267       StorageIndex end = convert_index(m_data.allocatedSize());
1268       for(Index j=1; j<=m_outerSize; ++j)
1269         m_outerIndex[j] = end;
1270     }
1271     else
1272     {
1273       // turn the matrix into non-compressed mode
1274       m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275       if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276       for(Index j=0; j<m_outerSize; ++j)
1277         m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1278     }
1279   }
1280   
1281   // check whether we can do a fast "push back" insertion
1282   Index data_end = m_data.allocatedSize();
1283   
1284   // First case: we are filling a new inner vector which is packed at the end.
1285   // We assume that all remaining inner-vectors are also empty and packed to the end.
1286   if(m_outerIndex[outer]==data_end)
1287   {
1288     eigen_internal_assert(m_innerNonZeros[outer]==0);
1289     
1290     // pack previous empty inner-vectors to end of the used-space
1291     // and allocate the entire free-space to the current inner-vector.
1292     StorageIndex p = convert_index(m_data.size());
1293     Index j = outer;
1294     while(j>=0 && m_innerNonZeros[j]==0)
1295       m_outerIndex[j--] = p;
1296     
1297     // push back the new element
1298     ++m_innerNonZeros[outer];
1299     m_data.append(Scalar(0), inner);
1300     
1301     // check for reallocation
1302     if(data_end != m_data.allocatedSize())
1303     {
1304       // m_data has been reallocated
1305       //  -> move remaining inner-vectors back to the end of the free-space
1306       //     so that the entire free-space is allocated to the current inner-vector.
1307       eigen_internal_assert(data_end < m_data.allocatedSize());
1308       StorageIndex new_end = convert_index(m_data.allocatedSize());
1309       for(Index k=outer+1; k<=m_outerSize; ++k)
1310         if(m_outerIndex[k]==data_end)
1311           m_outerIndex[k] = new_end;
1312     }
1313     return m_data.value(p);
1314   }
1315   
1316   // Second case: the next inner-vector is packed to the end
1317   // and the current inner-vector end match the used-space.
1318   if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319   {
1320     eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321     
1322     // add space for the new element
1323     ++m_innerNonZeros[outer];
1324     m_data.resize(m_data.size()+1);
1325     
1326     // check for reallocation
1327     if(data_end != m_data.allocatedSize())
1328     {
1329       // m_data has been reallocated
1330       //  -> move remaining inner-vectors back to the end of the free-space
1331       //     so that the entire free-space is allocated to the current inner-vector.
1332       eigen_internal_assert(data_end < m_data.allocatedSize());
1333       StorageIndex new_end = convert_index(m_data.allocatedSize());
1334       for(Index k=outer+1; k<=m_outerSize; ++k)
1335         if(m_outerIndex[k]==data_end)
1336           m_outerIndex[k] = new_end;
1337     }
1338     
1339     // and insert it at the right position (sorted insertion)
1340     Index startId = m_outerIndex[outer];
1341     Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342     while ( (p > startId) && (m_data.index(p-1) > inner) )
1343     {
1344       m_data.index(p) = m_data.index(p-1);
1345       m_data.value(p) = m_data.value(p-1);
1346       --p;
1347     }
1348     
1349     m_data.index(p) = convert_index(inner);
1350     return (m_data.value(p) = Scalar(0));
1351   }
1352   
1353   if(m_data.size() != m_data.allocatedSize())
1354   {
1355     // make sure the matrix is compatible to random un-compressed insertion:
1356     m_data.resize(m_data.allocatedSize());
1357     this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1358   }
1359   
1360   return insertUncompressed(row,col);
1361 }
1362     
1363 template<typename _Scalar, int _Options, typename _StorageIndex>
1364 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
1365 {
1366   eigen_assert(!isCompressed());
1367 
1368   const Index outer = IsRowMajor ? row : col;
1369   const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1370 
1371   Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372   StorageIndex innerNNZ = m_innerNonZeros[outer];
1373   if(innerNNZ>=room)
1374   {
1375     // this inner vector is full, we need to reallocate the whole buffer :(
1376     reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377   }
1378 
1379   Index startId = m_outerIndex[outer];
1380   Index p = startId + m_innerNonZeros[outer];
1381   while ( (p > startId) && (m_data.index(p-1) > inner) )
1382   {
1383     m_data.index(p) = m_data.index(p-1);
1384     m_data.value(p) = m_data.value(p-1);
1385     --p;
1386   }
1387   eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388 
1389   m_innerNonZeros[outer]++;
1390 
1391   m_data.index(p) = inner;
1392   return (m_data.value(p) = Scalar(0));
1393 }
1394 
1395 template<typename _Scalar, int _Options, typename _StorageIndex>
1396 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1397 {
1398   eigen_assert(isCompressed());
1399 
1400   const Index outer = IsRowMajor ? row : col;
1401   const Index inner = IsRowMajor ? col : row;
1402 
1403   Index previousOuter = outer;
1404   if (m_outerIndex[outer+1]==0)
1405   {
1406     // we start a new inner vector
1407     while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408     {
1409       m_outerIndex[previousOuter] = convert_index(m_data.size());
1410       --previousOuter;
1411     }
1412     m_outerIndex[outer+1] = m_outerIndex[outer];
1413   }
1414 
1415   // here we have to handle the tricky case where the outerIndex array
1416   // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1417   // the 2nd inner vector...
1418   bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419                 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420 
1421   std::size_t startId = m_outerIndex[outer];
1422   // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1423   std::size_t p = m_outerIndex[outer+1];
1424   ++m_outerIndex[outer+1];
1425 
1426   double reallocRatio = 1;
1427   if (m_data.allocatedSize()<=m_data.size())
1428   {
1429     // if there is no preallocated memory, let's reserve a minimum of 32 elements
1430     if (m_data.size()==0)
1431     {
1432       m_data.reserve(32);
1433     }
1434     else
1435     {
1436       // we need to reallocate the data, to reduce multiple reallocations
1437       // we use a smart resize algorithm based on the current filling ratio
1438       // in addition, we use double to avoid integers overflows
1439       double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440       reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441       // furthermore we bound the realloc ratio to:
1442       //   1) reduce multiple minor realloc when the matrix is almost filled
1443       //   2) avoid to allocate too much memory when the matrix is almost empty
1444       reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445     }
1446   }
1447   m_data.resize(m_data.size()+1,reallocRatio);
1448 
1449   if (!isLastVec)
1450   {
1451     if (previousOuter==-1)
1452     {
1453       // oops wrong guess.
1454       // let's correct the outer offsets
1455       for (Index k=0; k<=(outer+1); ++k)
1456         m_outerIndex[k] = 0;
1457       Index k=outer+1;
1458       while(m_outerIndex[k]==0)
1459         m_outerIndex[k++] = 1;
1460       while (k<=m_outerSize && m_outerIndex[k]!=0)
1461         m_outerIndex[k++]++;
1462       p = 0;
1463       --k;
1464       k = m_outerIndex[k]-1;
1465       while (k>0)
1466       {
1467         m_data.index(k) = m_data.index(k-1);
1468         m_data.value(k) = m_data.value(k-1);
1469         k--;
1470       }
1471     }
1472     else
1473     {
1474       // we are not inserting into the last inner vec
1475       // update outer indices:
1476       Index j = outer+2;
1477       while (j<=m_outerSize && m_outerIndex[j]!=0)
1478         m_outerIndex[j++]++;
1479       --j;
1480       // shift data of last vecs:
1481       Index k = m_outerIndex[j]-1;
1482       while (k>=Index(p))
1483       {
1484         m_data.index(k) = m_data.index(k-1);
1485         m_data.value(k) = m_data.value(k-1);
1486         k--;
1487       }
1488     }
1489   }
1490 
1491   while ( (p > startId) && (m_data.index(p-1) > inner) )
1492   {
1493     m_data.index(p) = m_data.index(p-1);
1494     m_data.value(p) = m_data.value(p-1);
1495     --p;
1496   }
1497 
1498   m_data.index(p) = inner;
1499   return (m_data.value(p) = Scalar(0));
1500 }
1501 
1502 namespace internal {
1503 
1504 template<typename _Scalar, int _Options, typename _StorageIndex>
1505 struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506   : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507 {
1508   typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1509   typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1510   evaluator() : Base() {}
1511   explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512 };
1513 
1514 }
1515 
1516 } // end namespace Eigen
1517 
1518 #endif // EIGEN_SPARSEMATRIX_H