Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:57:07

0001 // This file is part of Eigen, a lightweight C++ template library
0002 // for linear algebra.
0003 //
0004 // Copyright (C) 2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
0005 // Copyright (C) 2013 Gael Guennebaud <gael.guennebaud@inria.fr>
0006 //
0007 // This Source Code Form is subject to the terms of the Mozilla
0008 // Public License v. 2.0. If a copy of the MPL was not distributed
0009 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
0010 
0011 #ifndef EIGEN_SPARSEBLOCKMATRIX_H
0012 #define EIGEN_SPARSEBLOCKMATRIX_H
0013 
0014 namespace Eigen { 
0015 /** \ingroup SparseCore_Module
0016   *
0017   * \class BlockSparseMatrix
0018   *
0019   * \brief A versatile sparse matrix representation where each element is a block
0020   *
0021   * This class provides routines to manipulate block sparse matrices stored in a
0022   * BSR-like representation. There are two main types :
0023   *
0024   * 1. All blocks have the same number of rows and columns, called block size
0025   * in the following. In this case, if this block size is known at compile time,
0026   * it can be given as a template parameter like
0027   * \code
0028   * BlockSparseMatrix<Scalar, 3, ColMajor> bmat(b_rows, b_cols);
0029   * \endcode
0030   * Here, bmat is a b_rows x b_cols block sparse matrix
0031   * where each coefficient is a 3x3 dense matrix.
0032   * If the block size is fixed but will be given at runtime,
0033   * \code
0034   * BlockSparseMatrix<Scalar, Dynamic, ColMajor> bmat(b_rows, b_cols);
0035   * bmat.setBlockSize(block_size);
0036   * \endcode
0037   *
0038   * 2. The second case is for variable-block sparse matrices.
0039   * Here each block has its own dimensions. The only restriction is that all the blocks
0040   * in a row (resp. a column) should have the same number of rows (resp. of columns).
0041   * It is thus required in this case to describe the layout of the matrix by calling
0042   * setBlockLayout(rowBlocks, colBlocks).
0043   *
0044   * In any of the previous case, the matrix can be filled by calling setFromTriplets().
0045   * A regular sparse matrix can be converted to a block sparse matrix and vice versa.
0046   * It is obviously required to describe the block layout beforehand by calling either
0047   * setBlockSize() for fixed-size blocks or setBlockLayout for variable-size blocks.
0048   *
0049   * \tparam _Scalar The Scalar type
0050   * \tparam _BlockAtCompileTime The block layout option. It takes the following values
0051   * Dynamic : block size known at runtime
0052   * a numeric number : fixed-size block known at compile time
0053   */
0054 template<typename _Scalar, int _BlockAtCompileTime=Dynamic, int _Options=ColMajor, typename _StorageIndex=int> class BlockSparseMatrix;
0055 
0056 template<typename BlockSparseMatrixT> class BlockSparseMatrixView;
0057 
0058 namespace internal {
0059 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _Index>
0060 struct traits<BlockSparseMatrix<_Scalar,_BlockAtCompileTime,_Options, _Index> >
0061 {
0062   typedef _Scalar Scalar;
0063   typedef _Index Index;
0064   typedef Sparse StorageKind; // FIXME Where is it used ??
0065   typedef MatrixXpr XprKind;
0066   enum {
0067     RowsAtCompileTime = Dynamic,
0068     ColsAtCompileTime = Dynamic,
0069     MaxRowsAtCompileTime = Dynamic,
0070     MaxColsAtCompileTime = Dynamic,
0071     BlockSize = _BlockAtCompileTime,
0072     Flags = _Options | NestByRefBit | LvalueBit,
0073     CoeffReadCost = NumTraits<Scalar>::ReadCost,
0074     SupportedAccessPatterns = InnerRandomAccessPattern
0075   };
0076 };
0077 template<typename BlockSparseMatrixT>
0078 struct traits<BlockSparseMatrixView<BlockSparseMatrixT> >
0079 {
0080   typedef Ref<Matrix<typename BlockSparseMatrixT::Scalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > Scalar;
0081   typedef Ref<Matrix<typename BlockSparseMatrixT::RealScalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > RealScalar;
0082 
0083 };
0084 
0085 // Function object to sort a triplet list
0086 template<typename Iterator, bool IsColMajor>
0087 struct TripletComp
0088 {
0089   typedef typename Iterator::value_type Triplet;
0090   bool operator()(const Triplet& a, const Triplet& b)
0091   { if(IsColMajor)
0092       return ((a.col() == b.col() && a.row() < b.row()) || (a.col() < b.col()));
0093     else
0094       return ((a.row() == b.row() && a.col() < b.col()) || (a.row() < b.row()));
0095   }
0096 };
0097 } // end namespace internal
0098 
0099 
0100 /* Proxy to view the block sparse matrix as a regular sparse matrix */
0101 template<typename BlockSparseMatrixT>
0102 class BlockSparseMatrixView : public SparseMatrixBase<BlockSparseMatrixT>
0103 {
0104   public:
0105     typedef Ref<typename BlockSparseMatrixT::BlockScalar> Scalar;
0106     typedef Ref<typename BlockSparseMatrixT::BlockRealScalar> RealScalar;
0107     typedef typename BlockSparseMatrixT::Index Index;
0108     typedef  BlockSparseMatrixT Nested;
0109     enum {
0110       Flags = BlockSparseMatrixT::Options,
0111       Options = BlockSparseMatrixT::Options,
0112       RowsAtCompileTime = BlockSparseMatrixT::RowsAtCompileTime,
0113       ColsAtCompileTime = BlockSparseMatrixT::ColsAtCompileTime,
0114       MaxColsAtCompileTime = BlockSparseMatrixT::MaxColsAtCompileTime,
0115       MaxRowsAtCompileTime = BlockSparseMatrixT::MaxRowsAtCompileTime
0116     };
0117   public:
0118     BlockSparseMatrixView(const BlockSparseMatrixT& spblockmat)
0119      : m_spblockmat(spblockmat)
0120     {}
0121 
0122     Index outerSize() const
0123     {
0124       return (Flags&RowMajorBit) == 1 ? this->rows() : this->cols();
0125     }
0126     Index cols() const
0127     {
0128       return m_spblockmat.blockCols();
0129     }
0130     Index rows() const
0131     {
0132       return m_spblockmat.blockRows();
0133     }
0134     Scalar coeff(Index row, Index col)
0135     {
0136       return m_spblockmat.coeff(row, col);
0137     }
0138     Scalar coeffRef(Index row, Index col)
0139     {
0140       return m_spblockmat.coeffRef(row, col);
0141     }
0142     // Wrapper to iterate over all blocks
0143     class InnerIterator : public BlockSparseMatrixT::BlockInnerIterator
0144     {
0145       public:
0146       InnerIterator(const BlockSparseMatrixView& mat, Index outer)
0147           : BlockSparseMatrixT::BlockInnerIterator(mat.m_spblockmat, outer)
0148       {}
0149 
0150     };
0151 
0152   protected:
0153     const BlockSparseMatrixT& m_spblockmat;
0154 };
0155 
0156 // Proxy to view a regular vector as a block vector
0157 template<typename BlockSparseMatrixT, typename VectorType>
0158 class BlockVectorView
0159 {
0160   public:
0161     enum {
0162       BlockSize = BlockSparseMatrixT::BlockSize,
0163       ColsAtCompileTime = VectorType::ColsAtCompileTime,
0164       RowsAtCompileTime = VectorType::RowsAtCompileTime,
0165       Flags = VectorType::Flags
0166     };
0167     typedef Ref<const Matrix<typename BlockSparseMatrixT::Scalar, (RowsAtCompileTime==1)? 1 : BlockSize, (ColsAtCompileTime==1)? 1 : BlockSize> >Scalar;
0168     typedef typename BlockSparseMatrixT::Index Index;
0169   public:
0170     BlockVectorView(const BlockSparseMatrixT& spblockmat, const VectorType& vec)
0171     : m_spblockmat(spblockmat),m_vec(vec)
0172     { }
0173     inline Index cols() const
0174     {
0175       return m_vec.cols();
0176     }
0177     inline Index size() const
0178     {
0179       return m_spblockmat.blockRows();
0180     }
0181     inline Scalar coeff(Index bi) const
0182     {
0183       Index startRow = m_spblockmat.blockRowsIndex(bi);
0184       Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
0185       return m_vec.middleRows(startRow, rowSize);
0186     }
0187     inline Scalar coeff(Index bi, Index j) const
0188     {
0189       Index startRow = m_spblockmat.blockRowsIndex(bi);
0190       Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
0191       return m_vec.block(startRow, j, rowSize, 1);
0192     }
0193   protected:
0194     const BlockSparseMatrixT& m_spblockmat;
0195     const VectorType& m_vec;
0196 };
0197 
0198 template<typename VectorType, typename Index> class BlockVectorReturn;
0199 
0200 
0201 // Proxy to view a regular vector as a block vector
0202 template<typename BlockSparseMatrixT, typename VectorType>
0203 class BlockVectorReturn
0204 {
0205   public:
0206     enum {
0207       ColsAtCompileTime = VectorType::ColsAtCompileTime,
0208       RowsAtCompileTime = VectorType::RowsAtCompileTime,
0209       Flags = VectorType::Flags
0210     };
0211     typedef Ref<Matrix<typename VectorType::Scalar, RowsAtCompileTime, ColsAtCompileTime> > Scalar;
0212     typedef typename BlockSparseMatrixT::Index Index;
0213   public:
0214     BlockVectorReturn(const BlockSparseMatrixT& spblockmat, VectorType& vec)
0215     : m_spblockmat(spblockmat),m_vec(vec)
0216     { }
0217     inline Index size() const
0218     {
0219       return m_spblockmat.blockRows();
0220     }
0221     inline Scalar coeffRef(Index bi)
0222     {
0223       Index startRow = m_spblockmat.blockRowsIndex(bi);
0224       Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
0225       return m_vec.middleRows(startRow, rowSize);
0226     }
0227     inline Scalar coeffRef(Index bi, Index j)
0228     {
0229       Index startRow = m_spblockmat.blockRowsIndex(bi);
0230       Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
0231       return m_vec.block(startRow, j, rowSize, 1);
0232     }
0233 
0234   protected:
0235     const BlockSparseMatrixT& m_spblockmat;
0236     VectorType& m_vec;
0237 };
0238 
0239 // Block version of the sparse dense product
0240 template<typename Lhs, typename Rhs>
0241 class BlockSparseTimeDenseProduct;
0242 
0243 namespace internal {
0244 
0245 template<typename BlockSparseMatrixT, typename VecType>
0246 struct traits<BlockSparseTimeDenseProduct<BlockSparseMatrixT, VecType> >
0247 {
0248   typedef Dense StorageKind;
0249   typedef MatrixXpr XprKind;
0250   typedef typename BlockSparseMatrixT::Scalar Scalar;
0251   typedef typename BlockSparseMatrixT::Index Index;
0252   enum {
0253     RowsAtCompileTime = Dynamic,
0254     ColsAtCompileTime = Dynamic,
0255     MaxRowsAtCompileTime = Dynamic,
0256     MaxColsAtCompileTime = Dynamic,
0257     Flags = 0,
0258     CoeffReadCost = internal::traits<BlockSparseMatrixT>::CoeffReadCost
0259   };
0260 };
0261 } // end namespace internal
0262 
0263 template<typename Lhs, typename Rhs>
0264 class BlockSparseTimeDenseProduct
0265   : public ProductBase<BlockSparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
0266 {
0267   public:
0268     EIGEN_PRODUCT_PUBLIC_INTERFACE(BlockSparseTimeDenseProduct)
0269 
0270     BlockSparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
0271     {}
0272 
0273     template<typename Dest> void scaleAndAddTo(Dest& dest, const typename Rhs::Scalar& alpha) const
0274     {
0275       BlockVectorReturn<Lhs,Dest> tmpDest(m_lhs, dest);
0276       internal::sparse_time_dense_product( BlockSparseMatrixView<Lhs>(m_lhs),  BlockVectorView<Lhs, Rhs>(m_lhs, m_rhs), tmpDest, alpha);
0277     }
0278 
0279   private:
0280     BlockSparseTimeDenseProduct& operator=(const BlockSparseTimeDenseProduct&);
0281 };
0282 
0283 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
0284 class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_BlockAtCompileTime, _Options,_StorageIndex> >
0285 {
0286   public:
0287     typedef _Scalar Scalar;
0288     typedef typename NumTraits<Scalar>::Real RealScalar;
0289     typedef _StorageIndex StorageIndex;
0290     typedef typename internal::ref_selector<BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex> >::type Nested;
0291 
0292     enum {
0293       Options = _Options,
0294       Flags = Options,
0295       BlockSize=_BlockAtCompileTime,
0296       RowsAtCompileTime = Dynamic,
0297       ColsAtCompileTime = Dynamic,
0298       MaxRowsAtCompileTime = Dynamic,
0299       MaxColsAtCompileTime = Dynamic,
0300       IsVectorAtCompileTime = 0,
0301       IsColMajor = Flags&RowMajorBit ? 0 : 1
0302     };
0303     typedef Matrix<Scalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockScalar;
0304     typedef Matrix<RealScalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockRealScalar;
0305     typedef typename internal::conditional<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar>::type BlockScalarReturnType;
0306     typedef BlockSparseMatrix<Scalar, BlockSize, IsColMajor ? ColMajor : RowMajor, StorageIndex> PlainObject;
0307   public:
0308     // Default constructor
0309     BlockSparseMatrix()
0310     : m_innerBSize(0),m_outerBSize(0),m_innerOffset(0),m_outerOffset(0),
0311       m_nonzerosblocks(0),m_values(0),m_blockPtr(0),m_indices(0),
0312       m_outerIndex(0),m_blockSize(BlockSize)
0313     { }
0314 
0315 
0316     /**
0317      * \brief Construct and resize
0318      *
0319      */
0320     BlockSparseMatrix(Index brow, Index bcol)
0321       : m_innerBSize(IsColMajor ? brow : bcol),
0322         m_outerBSize(IsColMajor ? bcol : brow),
0323         m_innerOffset(0),m_outerOffset(0),m_nonzerosblocks(0),
0324         m_values(0),m_blockPtr(0),m_indices(0),
0325         m_outerIndex(0),m_blockSize(BlockSize)
0326     { }
0327 
0328     /**
0329      * \brief Copy-constructor
0330      */
0331     BlockSparseMatrix(const BlockSparseMatrix& other)
0332       : m_innerBSize(other.m_innerBSize),m_outerBSize(other.m_outerBSize),
0333         m_nonzerosblocks(other.m_nonzerosblocks),m_nonzeros(other.m_nonzeros),
0334         m_blockPtr(0),m_blockSize(other.m_blockSize)
0335     {
0336       // should we allow copying between variable-size blocks and fixed-size blocks ??
0337       eigen_assert(m_blockSize == BlockSize && " CAN NOT COPY BETWEEN FIXED-SIZE AND VARIABLE-SIZE BLOCKS");
0338 
0339       std::copy(other.m_innerOffset, other.m_innerOffset+m_innerBSize+1, m_innerOffset);
0340       std::copy(other.m_outerOffset, other.m_outerOffset+m_outerBSize+1, m_outerOffset);
0341       std::copy(other.m_values, other.m_values+m_nonzeros, m_values);
0342 
0343       if(m_blockSize != Dynamic)
0344         std::copy(other.m_blockPtr, other.m_blockPtr+m_nonzerosblocks, m_blockPtr);
0345 
0346       std::copy(other.m_indices, other.m_indices+m_nonzerosblocks, m_indices);
0347       std::copy(other.m_outerIndex, other.m_outerIndex+m_outerBSize, m_outerIndex);
0348     }
0349 
0350     friend void swap(BlockSparseMatrix& first, BlockSparseMatrix& second)
0351     {
0352       std::swap(first.m_innerBSize, second.m_innerBSize);
0353       std::swap(first.m_outerBSize, second.m_outerBSize);
0354       std::swap(first.m_innerOffset, second.m_innerOffset);
0355       std::swap(first.m_outerOffset, second.m_outerOffset);
0356       std::swap(first.m_nonzerosblocks, second.m_nonzerosblocks);
0357       std::swap(first.m_nonzeros, second.m_nonzeros);
0358       std::swap(first.m_values, second.m_values);
0359       std::swap(first.m_blockPtr, second.m_blockPtr);
0360       std::swap(first.m_indices, second.m_indices);
0361       std::swap(first.m_outerIndex, second.m_outerIndex);
0362       std::swap(first.m_BlockSize, second.m_blockSize);
0363     }
0364 
0365     BlockSparseMatrix& operator=(BlockSparseMatrix other)
0366     {
0367       //Copy-and-swap paradigm ... avoid leaked data if thrown
0368       swap(*this, other);
0369       return *this;
0370     }
0371 
0372     // Destructor
0373     ~BlockSparseMatrix()
0374     {
0375       delete[] m_outerIndex;
0376       delete[] m_innerOffset;
0377       delete[] m_outerOffset;
0378       delete[] m_indices;
0379       delete[] m_blockPtr;
0380       delete[] m_values;
0381     }
0382 
0383 
0384     /**
0385       * \brief Constructor from a sparse matrix
0386       *
0387       */
0388     template<typename MatrixType>
0389     inline BlockSparseMatrix(const MatrixType& spmat) : m_blockSize(BlockSize)
0390     {
0391       EIGEN_STATIC_ASSERT((m_blockSize != Dynamic), THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE);
0392 
0393       *this = spmat;
0394     }
0395 
0396     /**
0397       * \brief Assignment from a sparse matrix with the same storage order
0398       *
0399       * Convert from a sparse matrix to block sparse matrix.
0400       * \warning Before calling this function, tt is necessary to call
0401       * either setBlockLayout() (matrices with variable-size blocks)
0402       * or setBlockSize() (for fixed-size blocks).
0403       */
0404     template<typename MatrixType>
0405     inline BlockSparseMatrix& operator=(const MatrixType& spmat)
0406     {
0407       eigen_assert((m_innerBSize != 0 && m_outerBSize != 0)
0408                    && "Trying to assign to a zero-size matrix, call resize() first");
0409       eigen_assert(((MatrixType::Options&RowMajorBit) != IsColMajor) && "Wrong storage order");
0410       typedef SparseMatrix<bool,MatrixType::Options,typename MatrixType::Index> MatrixPatternType;
0411       MatrixPatternType  blockPattern(blockRows(), blockCols());
0412       m_nonzeros = 0;
0413 
0414       // First, compute the number of nonzero blocks and their locations
0415       for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
0416       {
0417         // Browse each outer block and compute the structure
0418         std::vector<bool> nzblocksFlag(m_innerBSize,false);  // Record the existing blocks
0419         blockPattern.startVec(bj);
0420         for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
0421         {
0422           typename MatrixType::InnerIterator it_spmat(spmat, j);
0423           for(; it_spmat; ++it_spmat)
0424           {
0425             StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
0426             if(!nzblocksFlag[bi])
0427             {
0428               // Save the index of this nonzero block
0429               nzblocksFlag[bi] = true;
0430               blockPattern.insertBackByOuterInnerUnordered(bj, bi) = true;
0431               // Compute the total number of nonzeros (including explicit zeros in blocks)
0432               m_nonzeros += blockOuterSize(bj) * blockInnerSize(bi);
0433             }
0434           }
0435         } // end current outer block
0436       }
0437       blockPattern.finalize();
0438 
0439       // Allocate the internal arrays
0440       setBlockStructure(blockPattern);
0441 
0442       for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
0443       for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
0444       {
0445         // Now copy the values
0446         for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
0447         {
0448           // Browse the outer block column by column (for column-major matrices)
0449           typename MatrixType::InnerIterator it_spmat(spmat, j);
0450           for(; it_spmat; ++it_spmat)
0451           {
0452             StorageIndex idx = 0; // Position of this block in the column block
0453             StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
0454             // Go to the inner block where this element belongs to
0455             while(bi > m_indices[m_outerIndex[bj]+idx]) ++idx; // Not expensive for ordered blocks
0456             StorageIndex idxVal;// Get the right position in the array of values for this element
0457             if(m_blockSize == Dynamic)
0458             {
0459               // Offset from all blocks before ...
0460               idxVal =  m_blockPtr[m_outerIndex[bj]+idx];
0461               // ... and offset inside the block
0462               idxVal += (j - blockOuterIndex(bj)) * blockOuterSize(bj) + it_spmat.index() - m_innerOffset[bi];
0463             }
0464             else
0465             {
0466               // All blocks before
0467               idxVal = (m_outerIndex[bj] + idx) * m_blockSize * m_blockSize;
0468               // inside the block
0469               idxVal += (j - blockOuterIndex(bj)) * m_blockSize + (it_spmat.index()%m_blockSize);
0470             }
0471             // Insert the value
0472             m_values[idxVal] = it_spmat.value();
0473           } // end of this column
0474         } // end of this block
0475       } // end of this outer block
0476 
0477       return *this;
0478     }
0479 
0480     /**
0481       * \brief Set the nonzero block pattern of the matrix
0482       *
0483       * Given a sparse matrix describing the nonzero block pattern,
0484       * this function prepares the internal pointers for values.
0485       * After calling this function, any *nonzero* block (bi, bj) can be set
0486       * with a simple call to coeffRef(bi,bj).
0487       *
0488       *
0489       * \warning Before calling this function, tt is necessary to call
0490       * either setBlockLayout() (matrices with variable-size blocks)
0491       * or setBlockSize() (for fixed-size blocks).
0492       *
0493       * \param blockPattern Sparse matrix of boolean elements describing the block structure
0494       *
0495       * \sa setBlockLayout() \sa setBlockSize()
0496       */
0497     template<typename MatrixType>
0498     void setBlockStructure(const MatrixType& blockPattern)
0499     {
0500       resize(blockPattern.rows(), blockPattern.cols());
0501       reserve(blockPattern.nonZeros());
0502 
0503       // Browse the block pattern and set up the various pointers
0504       m_outerIndex[0] = 0;
0505       if(m_blockSize == Dynamic) m_blockPtr[0] = 0;
0506       for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
0507       for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
0508       {
0509         //Browse each outer block
0510 
0511         //First, copy and save the indices of nonzero blocks
0512         //FIXME : find a way to avoid this ...
0513         std::vector<int> nzBlockIdx;
0514         typename MatrixType::InnerIterator it(blockPattern, bj);
0515         for(; it; ++it)
0516         {
0517           nzBlockIdx.push_back(it.index());
0518         }
0519         std::sort(nzBlockIdx.begin(), nzBlockIdx.end());
0520 
0521         // Now, fill block indices and (eventually) pointers to blocks
0522         for(StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx)
0523         {
0524           StorageIndex offset = m_outerIndex[bj]+idx; // offset in m_indices
0525           m_indices[offset] = nzBlockIdx[idx];
0526           if(m_blockSize == Dynamic)
0527             m_blockPtr[offset] = m_blockPtr[offset-1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj);
0528           // There is no blockPtr for fixed-size blocks... not needed !???
0529         }
0530         // Save the pointer to the next outer block
0531         m_outerIndex[bj+1] = m_outerIndex[bj] + nzBlockIdx.size();
0532       }
0533     }
0534 
0535     /**
0536       * \brief Set the number of rows and columns blocks
0537       */
0538     inline void resize(Index brow, Index bcol)
0539     {
0540       m_innerBSize = IsColMajor ? brow : bcol;
0541       m_outerBSize = IsColMajor ? bcol : brow;
0542     }
0543 
0544     /**
0545       * \brief set the block size at runtime for fixed-size block layout
0546       *
0547       * Call this only for fixed-size blocks
0548       */
0549     inline void setBlockSize(Index blockSize)
0550     {
0551       m_blockSize = blockSize;
0552     }
0553 
0554     /**
0555       * \brief Set the row and column block layouts,
0556       *
0557       * This function set the size of each row and column block.
0558       * So this function should be used only for blocks with variable size.
0559       * \param rowBlocks : Number of rows per row block
0560       * \param colBlocks : Number of columns per column block
0561       * \sa resize(), setBlockSize()
0562       */
0563     inline void setBlockLayout(const VectorXi& rowBlocks, const VectorXi& colBlocks)
0564     {
0565       const VectorXi& innerBlocks = IsColMajor ? rowBlocks : colBlocks;
0566       const VectorXi& outerBlocks = IsColMajor ? colBlocks : rowBlocks;
0567       eigen_assert(m_innerBSize == innerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
0568       eigen_assert(m_outerBSize == outerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
0569       m_outerBSize = outerBlocks.size();
0570       //  starting index of blocks... cumulative sums
0571       m_innerOffset = new StorageIndex[m_innerBSize+1];
0572       m_outerOffset = new StorageIndex[m_outerBSize+1];
0573       m_innerOffset[0] = 0;
0574       m_outerOffset[0] = 0;
0575       std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize-1]+1, &m_innerOffset[1]);
0576       std::partial_sum(&outerBlocks[0], &outerBlocks[m_outerBSize-1]+1, &m_outerOffset[1]);
0577 
0578       // Compute the total number of nonzeros
0579       m_nonzeros = 0;
0580       for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
0581         for(StorageIndex bi = 0; bi < m_innerBSize; ++bi)
0582           m_nonzeros += outerBlocks[bj] * innerBlocks[bi];
0583 
0584     }
0585 
0586     /**
0587       * \brief Allocate the internal array of pointers to blocks and their inner indices
0588       *
0589       * \note For fixed-size blocks, call setBlockSize() to set the block.
0590       * And For variable-size blocks, call setBlockLayout() before using this function
0591       *
0592       * \param nonzerosblocks Number of nonzero blocks. The total number of nonzeros is
0593       * is computed in setBlockLayout() for variable-size blocks
0594       * \sa setBlockSize()
0595       */
0596     inline void reserve(const Index nonzerosblocks)
0597     {
0598       eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) &&
0599           "TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first");
0600 
0601       //FIXME Should free if already allocated
0602       m_outerIndex = new StorageIndex[m_outerBSize+1];
0603 
0604       m_nonzerosblocks = nonzerosblocks;
0605       if(m_blockSize != Dynamic)
0606       {
0607         m_nonzeros = nonzerosblocks * (m_blockSize * m_blockSize);
0608         m_blockPtr = 0;
0609       }
0610       else
0611       {
0612         // m_nonzeros  is already computed in setBlockLayout()
0613         m_blockPtr = new StorageIndex[m_nonzerosblocks+1];
0614       }
0615       m_indices = new StorageIndex[m_nonzerosblocks+1];
0616       m_values = new Scalar[m_nonzeros];
0617     }
0618 
0619 
0620     /**
0621       * \brief Fill values in a matrix  from a triplet list.
0622       *
0623       * Each triplet item has a block stored in an Eigen dense matrix.
0624       * The InputIterator class should provide the functions row(), col() and value()
0625       *
0626       * \note For fixed-size blocks, call setBlockSize() before this function.
0627       *
0628       * FIXME Do not accept duplicates
0629       */
0630     template<typename InputIterator>
0631     void setFromTriplets(const InputIterator& begin, const InputIterator& end)
0632     {
0633       eigen_assert((m_innerBSize!=0 && m_outerBSize !=0) && "ZERO BLOCKS, PLEASE CALL resize() before");
0634 
0635       /* First, sort the triplet list
0636         * FIXME This can be unnecessarily expensive since only the inner indices have to be sorted
0637         * The best approach is like in SparseMatrix::setFromTriplets()
0638         */
0639       internal::TripletComp<InputIterator, IsColMajor> tripletcomp;
0640       std::sort(begin, end, tripletcomp);
0641 
0642       /* Count the number of rows and column blocks,
0643        * and the number of nonzero blocks per outer dimension
0644        */
0645       VectorXi rowBlocks(m_innerBSize); // Size of each block row
0646       VectorXi colBlocks(m_outerBSize); // Size of each block column
0647       rowBlocks.setZero(); colBlocks.setZero();
0648       VectorXi nzblock_outer(m_outerBSize); // Number of nz blocks per outer vector
0649       VectorXi nz_outer(m_outerBSize); // Number of nz per outer vector...for variable-size blocks
0650       nzblock_outer.setZero();
0651       nz_outer.setZero();
0652       for(InputIterator it(begin); it !=end; ++it)
0653       {
0654         eigen_assert(it->row() >= 0 && it->row() < this->blockRows() && it->col() >= 0 && it->col() < this->blockCols());
0655         eigen_assert((it->value().rows() == it->value().cols() && (it->value().rows() == m_blockSize))
0656                      || (m_blockSize == Dynamic));
0657 
0658         if(m_blockSize == Dynamic)
0659         {
0660           eigen_assert((rowBlocks[it->row()] == 0 || rowBlocks[it->row()] == it->value().rows()) &&
0661               "NON CORRESPONDING SIZES FOR ROW BLOCKS");
0662           eigen_assert((colBlocks[it->col()] == 0 || colBlocks[it->col()] == it->value().cols()) &&
0663               "NON CORRESPONDING SIZES FOR COLUMN BLOCKS");
0664           rowBlocks[it->row()] =it->value().rows();
0665           colBlocks[it->col()] = it->value().cols();
0666         }
0667         nz_outer(IsColMajor ? it->col() : it->row()) += it->value().rows() * it->value().cols();
0668         nzblock_outer(IsColMajor ? it->col() : it->row())++;
0669       }
0670       // Allocate member arrays
0671       if(m_blockSize == Dynamic) setBlockLayout(rowBlocks, colBlocks);
0672       StorageIndex nzblocks = nzblock_outer.sum();
0673       reserve(nzblocks);
0674 
0675        // Temporary markers
0676       VectorXi block_id(m_outerBSize); // To be used as a block marker during insertion
0677 
0678       // Setup outer index pointers and markers
0679       m_outerIndex[0] = 0;
0680       if (m_blockSize == Dynamic)  m_blockPtr[0] =  0;
0681       for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
0682       {
0683         m_outerIndex[bj+1] = m_outerIndex[bj] + nzblock_outer(bj);
0684         block_id(bj) = m_outerIndex[bj];
0685         if(m_blockSize==Dynamic)
0686         {
0687           m_blockPtr[m_outerIndex[bj+1]] = m_blockPtr[m_outerIndex[bj]] + nz_outer(bj);
0688         }
0689       }
0690 
0691       // Fill the matrix
0692       for(InputIterator it(begin); it!=end; ++it)
0693       {
0694         StorageIndex outer = IsColMajor ? it->col() : it->row();
0695         StorageIndex inner = IsColMajor ? it->row() : it->col();
0696         m_indices[block_id(outer)] = inner;
0697         StorageIndex block_size = it->value().rows()*it->value().cols();
0698         StorageIndex nz_marker = blockPtr(block_id[outer]);
0699         memcpy(&(m_values[nz_marker]), it->value().data(), block_size * sizeof(Scalar));
0700         if(m_blockSize == Dynamic)
0701         {
0702           m_blockPtr[block_id(outer)+1] = m_blockPtr[block_id(outer)] + block_size;
0703         }
0704         block_id(outer)++;
0705       }
0706 
0707       // An alternative when the outer indices are sorted...no need to use an array of markers
0708 //      for(Index bcol = 0; bcol < m_outerBSize; ++bcol)
0709 //      {
0710 //      Index id = 0, id_nz = 0, id_nzblock = 0;
0711 //      for(InputIterator it(begin); it!=end; ++it)
0712 //      {
0713 //        while (id<bcol) // one pass should do the job unless there are empty columns
0714 //        {
0715 //          id++;
0716 //          m_outerIndex[id+1]=m_outerIndex[id];
0717 //        }
0718 //        m_outerIndex[id+1] += 1;
0719 //        m_indices[id_nzblock]=brow;
0720 //        Index block_size = it->value().rows()*it->value().cols();
0721 //        m_blockPtr[id_nzblock+1] = m_blockPtr[id_nzblock] + block_size;
0722 //        id_nzblock++;
0723 //        memcpy(&(m_values[id_nz]),it->value().data(), block_size*sizeof(Scalar));
0724 //        id_nz += block_size;
0725 //      }
0726 //      while(id < m_outerBSize-1) // Empty columns at the end
0727 //      {
0728 //        id++;
0729 //        m_outerIndex[id+1]=m_outerIndex[id];
0730 //      }
0731 //      }
0732     }
0733 
0734 
0735     /**
0736       * \returns the number of rows
0737       */
0738     inline Index rows() const
0739     {
0740 //      return blockRows();
0741       return (IsColMajor ? innerSize() : outerSize());
0742     }
0743 
0744     /**
0745       * \returns the number of cols
0746       */
0747     inline Index cols() const
0748     {
0749 //      return blockCols();
0750       return (IsColMajor ? outerSize() : innerSize());
0751     }
0752 
0753     inline Index innerSize() const
0754     {
0755       if(m_blockSize == Dynamic) return m_innerOffset[m_innerBSize];
0756       else return  (m_innerBSize * m_blockSize) ;
0757     }
0758 
0759     inline Index outerSize() const
0760     {
0761       if(m_blockSize == Dynamic) return m_outerOffset[m_outerBSize];
0762       else return  (m_outerBSize * m_blockSize) ;
0763     }
0764     /** \returns the number of rows grouped by blocks */
0765     inline Index blockRows() const
0766     {
0767       return (IsColMajor ? m_innerBSize : m_outerBSize);
0768     }
0769     /** \returns the number of columns grouped by blocks */
0770     inline Index blockCols() const
0771     {
0772       return (IsColMajor ? m_outerBSize : m_innerBSize);
0773     }
0774 
0775     inline Index outerBlocks() const { return m_outerBSize; }
0776     inline Index innerBlocks() const { return m_innerBSize; }
0777 
0778     /** \returns the block index where outer belongs to */
0779     inline Index outerToBlock(Index outer) const
0780     {
0781       eigen_assert(outer < outerSize() && "OUTER INDEX OUT OF BOUNDS");
0782 
0783       if(m_blockSize != Dynamic)
0784         return (outer / m_blockSize); // Integer division
0785 
0786       StorageIndex b_outer = 0;
0787       while(m_outerOffset[b_outer] <= outer) ++b_outer;
0788       return b_outer - 1;
0789     }
0790     /** \returns  the block index where inner belongs to */
0791     inline Index innerToBlock(Index inner) const
0792     {
0793       eigen_assert(inner < innerSize() && "OUTER INDEX OUT OF BOUNDS");
0794 
0795       if(m_blockSize != Dynamic)
0796         return (inner / m_blockSize); // Integer division
0797 
0798       StorageIndex b_inner = 0;
0799       while(m_innerOffset[b_inner] <= inner) ++b_inner;
0800       return b_inner - 1;
0801     }
0802 
0803     /**
0804       *\returns a reference to the (i,j) block as an Eigen Dense Matrix
0805       */
0806     Ref<BlockScalar> coeffRef(Index brow, Index bcol)
0807     {
0808       eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
0809       eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS");
0810 
0811       StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
0812       StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
0813       StorageIndex inner = IsColMajor ? brow : bcol;
0814       StorageIndex outer = IsColMajor ? bcol : brow;
0815       StorageIndex offset = m_outerIndex[outer];
0816       while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner)
0817         offset++;
0818       if(m_indices[offset] == inner)
0819       {
0820         return Map<BlockScalar>(&(m_values[blockPtr(offset)]), rsize, csize);
0821       }
0822       else
0823       {
0824         //FIXME the block does not exist, Insert it !!!!!!!!!
0825         eigen_assert("DYNAMIC INSERTION IS NOT YET SUPPORTED");
0826       }
0827     }
0828 
0829     /**
0830       * \returns the value of the (i,j) block as an Eigen Dense Matrix
0831       */
0832     Map<const BlockScalar> coeff(Index brow, Index bcol) const
0833     {
0834       eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
0835       eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS");
0836 
0837       StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
0838       StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
0839       StorageIndex inner = IsColMajor ? brow : bcol;
0840       StorageIndex outer = IsColMajor ? bcol : brow;
0841       StorageIndex offset = m_outerIndex[outer];
0842       while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++;
0843       if(m_indices[offset] == inner)
0844       {
0845         return Map<const BlockScalar> (&(m_values[blockPtr(offset)]), rsize, csize);
0846       }
0847       else
0848 //        return BlockScalar::Zero(rsize, csize);
0849         eigen_assert("NOT YET SUPPORTED");
0850     }
0851 
0852     // Block Matrix times vector product
0853     template<typename VecType>
0854     BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType> operator*(const VecType& lhs) const
0855     {
0856       return BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType>(*this, lhs);
0857     }
0858 
0859     /** \returns the number of nonzero blocks */
0860     inline Index nonZerosBlocks() const { return m_nonzerosblocks; }
0861     /** \returns the total number of nonzero elements, including eventual explicit zeros in blocks */
0862     inline Index nonZeros() const { return m_nonzeros; }
0863 
0864     inline BlockScalarReturnType *valuePtr() {return static_cast<BlockScalarReturnType *>(m_values);}
0865 //    inline Scalar *valuePtr(){ return m_values; }
0866     inline StorageIndex *innerIndexPtr() {return m_indices; }
0867     inline const StorageIndex *innerIndexPtr() const {return m_indices; }
0868     inline StorageIndex *outerIndexPtr() {return m_outerIndex; }
0869     inline const StorageIndex* outerIndexPtr() const {return m_outerIndex; }
0870 
0871     /** \brief for compatibility purposes with the SparseMatrix class */
0872     inline bool isCompressed() const {return true;}
0873     /**
0874       * \returns the starting index of the bi row block
0875       */
0876     inline Index blockRowsIndex(Index bi) const
0877     {
0878       return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi);
0879     }
0880 
0881     /**
0882       * \returns the starting index of the bj col block
0883       */
0884     inline Index blockColsIndex(Index bj) const
0885     {
0886       return IsColMajor ? blockOuterIndex(bj) : blockInnerIndex(bj);
0887     }
0888 
0889     inline Index blockOuterIndex(Index bj) const
0890     {
0891       return (m_blockSize == Dynamic) ? m_outerOffset[bj] : (bj * m_blockSize);
0892     }
0893     inline Index blockInnerIndex(Index bi) const
0894     {
0895       return (m_blockSize == Dynamic) ? m_innerOffset[bi] : (bi * m_blockSize);
0896     }
0897 
0898     // Not needed ???
0899     inline Index blockInnerSize(Index bi) const
0900     {
0901       return (m_blockSize == Dynamic) ? (m_innerOffset[bi+1] - m_innerOffset[bi]) : m_blockSize;
0902     }
0903     inline Index blockOuterSize(Index bj) const
0904     {
0905       return (m_blockSize == Dynamic) ? (m_outerOffset[bj+1]- m_outerOffset[bj]) : m_blockSize;
0906     }
0907 
0908     /**
0909       * \brief Browse the matrix by outer index
0910       */
0911     class InnerIterator; // Browse column by column
0912 
0913     /**
0914       * \brief Browse the matrix by block outer index
0915       */
0916     class BlockInnerIterator; // Browse block by block
0917 
0918     friend std::ostream & operator << (std::ostream & s, const BlockSparseMatrix& m)
0919     {
0920       for (StorageIndex j = 0; j < m.outerBlocks(); ++j)
0921       {
0922         BlockInnerIterator itb(m, j);
0923         for(; itb; ++itb)
0924         {
0925           s << "("<<itb.row() << ", " << itb.col() << ")\n";
0926           s << itb.value() <<"\n";
0927         }
0928       }
0929       s << std::endl;
0930       return s;
0931     }
0932 
0933     /**
0934       * \returns the starting position of the block \p id in the array of values
0935       */
0936     Index blockPtr(Index id) const
0937     {
0938       if(m_blockSize == Dynamic) return m_blockPtr[id];
0939       else return id * m_blockSize * m_blockSize;
0940       //return blockDynIdx(id, typename internal::conditional<(BlockSize==Dynamic), internal::true_type, internal::false_type>::type());
0941     }
0942 
0943 
0944   protected:
0945 //    inline Index blockDynIdx(Index id, internal::true_type) const
0946 //    {
0947 //      return m_blockPtr[id];
0948 //    }
0949 //    inline Index blockDynIdx(Index id, internal::false_type) const
0950 //    {
0951 //      return id * BlockSize * BlockSize;
0952 //    }
0953 
0954     // To be implemented
0955     // Insert a block at a particular location... need to make a room for that
0956     Map<BlockScalar> insert(Index brow, Index bcol);
0957 
0958     Index m_innerBSize; // Number of block rows
0959     Index m_outerBSize; // Number of block columns
0960     StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1)
0961     StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1)
0962     Index m_nonzerosblocks; // Total nonzeros blocks (lower than  m_innerBSize x m_outerBSize)
0963     Index m_nonzeros; // Total nonzeros elements
0964     Scalar *m_values; //Values stored block column after block column (size m_nonzeros)
0965     StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks
0966     StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK
0967     StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK
0968     Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1
0969 };
0970 
0971 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
0972 class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator
0973 {
0974   public:
0975 
0976     enum{
0977       Flags = _Options
0978     };
0979 
0980     BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer)
0981     : m_mat(mat),m_outer(outer),
0982       m_id(mat.m_outerIndex[outer]),
0983       m_end(mat.m_outerIndex[outer+1])
0984     {
0985     }
0986 
0987     inline BlockInnerIterator& operator++() {m_id++; return *this; }
0988 
0989     inline const Map<const BlockScalar> value() const
0990     {
0991       return Map<const BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
0992           rows(),cols());
0993     }
0994     inline Map<BlockScalar> valueRef()
0995     {
0996       return Map<BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
0997           rows(),cols());
0998     }
0999     // Block inner index
1000     inline Index index() const {return m_mat.m_indices[m_id]; }
1001     inline Index outer() const { return m_outer; }
1002     // block row index
1003     inline Index row() const  {return index(); }
1004     // block column index
1005     inline Index col() const {return outer(); }
1006     // FIXME Number of rows in the current block
1007     inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; }
1008     // Number of columns in the current block ...
1009     inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;}
1010     inline operator bool() const { return (m_id < m_end); }
1011 
1012   protected:
1013     const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat;
1014     const Index m_outer;
1015     Index m_id;
1016     Index m_end;
1017 };
1018 
1019 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
1020 class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::InnerIterator
1021 {
1022   public:
1023     InnerIterator(const BlockSparseMatrix& mat, Index outer)
1024     : m_mat(mat),m_outerB(mat.outerToBlock(outer)),m_outer(outer),
1025       itb(mat, mat.outerToBlock(outer)),
1026       m_offset(outer - mat.blockOuterIndex(m_outerB))
1027      {
1028         if (itb)
1029         {
1030           m_id = m_mat.blockInnerIndex(itb.index());
1031           m_start = m_id;
1032           m_end = m_mat.blockInnerIndex(itb.index()+1);
1033         }
1034      }
1035     inline InnerIterator& operator++()
1036     {
1037       m_id++;
1038       if (m_id >= m_end)
1039       {
1040         ++itb;
1041         if (itb)
1042         {
1043           m_id = m_mat.blockInnerIndex(itb.index());
1044           m_start = m_id;
1045           m_end = m_mat.blockInnerIndex(itb.index()+1);
1046         }
1047       }
1048       return *this;
1049     }
1050     inline const Scalar& value() const
1051     {
1052       return itb.value().coeff(m_id - m_start, m_offset);
1053     }
1054     inline Scalar& valueRef()
1055     {
1056       return itb.valueRef().coeff(m_id - m_start, m_offset);
1057     }
1058     inline Index index() const { return m_id; }
1059     inline Index outer() const {return m_outer; }
1060     inline Index col() const {return outer(); }
1061     inline Index row() const { return index();}
1062     inline operator bool() const
1063     {
1064       return itb;
1065     }
1066   protected:
1067     const BlockSparseMatrix& m_mat;
1068     const Index m_outer;
1069     const Index m_outerB;
1070     BlockInnerIterator itb; // Iterator through the blocks
1071     const Index m_offset; // Position of this column in the block
1072     Index m_start; // starting inner index of this block
1073     Index m_id; // current inner index in the block
1074     Index m_end; // starting inner index of the next block
1075 
1076 };
1077 } // end namespace Eigen
1078 
1079 #endif // EIGEN_SPARSEBLOCKMATRIX_H