Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/eigen3/Eigen/src/SparseCore/SparseBlock.h was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 // This file is part of Eigen, a lightweight C++ template library
0002 // for linear algebra.
0003 //
0004 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
0005 //
0006 // This Source Code Form is subject to the terms of the Mozilla
0007 // Public License v. 2.0. If a copy of the MPL was not distributed
0008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
0009 
0010 #ifndef EIGEN_SPARSE_BLOCK_H
0011 #define EIGEN_SPARSE_BLOCK_H
0012 
0013 namespace Eigen {
0014 
0015 // Subset of columns or rows
0016 template<typename XprType, int BlockRows, int BlockCols>
0017 class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
0018   : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
0019 {
0020     typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
0021     typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
0022 public:
0023     enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
0024 protected:
0025     enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
0026     typedef SparseMatrixBase<BlockType> Base;
0027     using Base::convert_index;
0028 public:
0029     EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
0030 
0031     inline BlockImpl(XprType& xpr, Index i)
0032       : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
0033     {}
0034 
0035     inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
0036       : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
0037     {}
0038 
0039     EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
0040     EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
0041 
0042     Index nonZeros() const
0043     {
0044       typedef internal::evaluator<XprType> EvaluatorType;
0045       EvaluatorType matEval(m_matrix);
0046       Index nnz = 0;
0047       Index end = m_outerStart + m_outerSize.value();
0048       for(Index j=m_outerStart; j<end; ++j)
0049         for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
0050           ++nnz;
0051       return nnz;
0052     }
0053 
0054     inline const Scalar coeff(Index row, Index col) const
0055     {
0056       return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
0057     }
0058 
0059     inline const Scalar coeff(Index index) const
0060     {
0061       return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);
0062     }
0063 
0064     inline const XprType& nestedExpression() const { return m_matrix; }
0065     inline XprType& nestedExpression() { return m_matrix; }
0066     Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
0067     Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
0068     Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
0069     Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
0070 
0071   protected:
0072 
0073     typename internal::ref_selector<XprType>::non_const_type m_matrix;
0074     Index m_outerStart;
0075     const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
0076 
0077   protected:
0078     // Disable assignment with clear error message.
0079     // Note that simply removing operator= yields compilation errors with ICC+MSVC
0080     template<typename T>
0081     BlockImpl& operator=(const T&)
0082     {
0083       EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
0084       return *this;
0085     }
0086 };
0087 
0088 
0089 /***************************************************************************
0090 * specialization for SparseMatrix
0091 ***************************************************************************/
0092 
0093 namespace internal {
0094 
0095 template<typename SparseMatrixType, int BlockRows, int BlockCols>
0096 class sparse_matrix_block_impl
0097   : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
0098 {
0099     typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
0100     typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
0101     typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
0102     using Base::convert_index;
0103 public:
0104     enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
0105     EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
0106 protected:
0107     typedef typename Base::IndexVector IndexVector;
0108     enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
0109 public:
0110 
0111     inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
0112       : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
0113     {}
0114 
0115     inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
0116       : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
0117     {}
0118 
0119     template<typename OtherDerived>
0120     inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
0121     {
0122       typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
0123       _NestedMatrixType& matrix = m_matrix;
0124       // This assignment is slow if this vector set is not empty
0125       // and/or it is not at the end of the nonzeros of the underlying matrix.
0126 
0127       // 1 - eval to a temporary to avoid transposition and/or aliasing issues
0128       Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());
0129       eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
0130 
0131       // 2 - let's check whether there is enough allocated memory
0132       Index nnz           = tmp.nonZeros();
0133       Index start         = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
0134       Index end           = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
0135       Index block_size    = end - start;                                                // available room in the current block
0136       Index tail_size     = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
0137 
0138       Index free_size     = m_matrix.isCompressed()
0139                           ? Index(matrix.data().allocatedSize()) + block_size
0140                           : block_size;
0141 
0142       Index tmp_start = tmp.outerIndexPtr()[0];
0143 
0144       bool update_trailing_pointers = false;
0145       if(nnz>free_size)
0146       {
0147         // realloc manually to reduce copies
0148         typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
0149 
0150         internal::smart_copy(m_matrix.valuePtr(),       m_matrix.valuePtr() + start,      newdata.valuePtr());
0151         internal::smart_copy(m_matrix.innerIndexPtr(),  m_matrix.innerIndexPtr() + start, newdata.indexPtr());
0152 
0153         internal::smart_copy(tmp.valuePtr() + tmp_start,      tmp.valuePtr() + tmp_start + nnz,       newdata.valuePtr() + start);
0154         internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz,  newdata.indexPtr() + start);
0155 
0156         internal::smart_copy(matrix.valuePtr()+end,       matrix.valuePtr()+end + tail_size,      newdata.valuePtr()+start+nnz);
0157         internal::smart_copy(matrix.innerIndexPtr()+end,  matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
0158 
0159         newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
0160 
0161         matrix.data().swap(newdata);
0162 
0163         update_trailing_pointers = true;
0164       }
0165       else
0166       {
0167         if(m_matrix.isCompressed() && nnz!=block_size)
0168         {
0169           // no need to realloc, simply copy the tail at its respective position and insert tmp
0170           matrix.data().resize(start + nnz + tail_size);
0171 
0172           internal::smart_memmove(matrix.valuePtr()+end,      matrix.valuePtr() + end+tail_size,      matrix.valuePtr() + start+nnz);
0173           internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
0174 
0175           update_trailing_pointers = true;
0176         }
0177 
0178         internal::smart_copy(tmp.valuePtr() + tmp_start,      tmp.valuePtr() + tmp_start + nnz,       matrix.valuePtr() + start);
0179         internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz,  matrix.innerIndexPtr() + start);
0180       }
0181 
0182       // update outer index pointers and innerNonZeros
0183       if(IsVectorAtCompileTime)
0184       {
0185         if(!m_matrix.isCompressed())
0186           matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
0187         matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
0188       }
0189       else
0190       {
0191         StorageIndex p = StorageIndex(start);
0192         for(Index k=0; k<m_outerSize.value(); ++k)
0193         {
0194           StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
0195           if(!m_matrix.isCompressed())
0196             matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
0197           matrix.outerIndexPtr()[m_outerStart+k] = p;
0198           p += nnz_k;
0199         }
0200       }
0201 
0202       if(update_trailing_pointers)
0203       {
0204         StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
0205         for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
0206         {
0207           matrix.outerIndexPtr()[k] += offset;
0208         }
0209       }
0210 
0211       return derived();
0212     }
0213 
0214     inline BlockType& operator=(const BlockType& other)
0215     {
0216       return operator=<BlockType>(other);
0217     }
0218 
0219     inline const Scalar* valuePtr() const
0220     { return m_matrix.valuePtr(); }
0221     inline Scalar* valuePtr()
0222     { return m_matrix.valuePtr(); }
0223 
0224     inline const StorageIndex* innerIndexPtr() const
0225     { return m_matrix.innerIndexPtr(); }
0226     inline StorageIndex* innerIndexPtr()
0227     { return m_matrix.innerIndexPtr(); }
0228 
0229     inline const StorageIndex* outerIndexPtr() const
0230     { return m_matrix.outerIndexPtr() + m_outerStart; }
0231     inline StorageIndex* outerIndexPtr()
0232     { return m_matrix.outerIndexPtr() + m_outerStart; }
0233 
0234     inline const StorageIndex* innerNonZeroPtr() const
0235     { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
0236     inline StorageIndex* innerNonZeroPtr()
0237     { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
0238 
0239     bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
0240 
0241     inline Scalar& coeffRef(Index row, Index col)
0242     {
0243       return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
0244     }
0245 
0246     inline const Scalar coeff(Index row, Index col) const
0247     {
0248       return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
0249     }
0250 
0251     inline const Scalar coeff(Index index) const
0252     {
0253       return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);
0254     }
0255 
0256     const Scalar& lastCoeff() const
0257     {
0258       EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
0259       eigen_assert(Base::nonZeros()>0);
0260       if(m_matrix.isCompressed())
0261         return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
0262       else
0263         return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
0264     }
0265 
0266     EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
0267     EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
0268 
0269     inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
0270     inline SparseMatrixType& nestedExpression() { return m_matrix; }
0271     Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
0272     Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
0273     Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
0274     Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
0275 
0276   protected:
0277 
0278     typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;
0279     Index m_outerStart;
0280     const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
0281 
0282 };
0283 
0284 } // namespace internal
0285 
0286 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
0287 class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
0288   : public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
0289 {
0290 public:
0291   typedef _StorageIndex StorageIndex;
0292   typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
0293   typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
0294   inline BlockImpl(SparseMatrixType& xpr, Index i)
0295     : Base(xpr, i)
0296   {}
0297 
0298   inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
0299     : Base(xpr, startRow, startCol, blockRows, blockCols)
0300   {}
0301 
0302   using Base::operator=;
0303 };
0304 
0305 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
0306 class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
0307   : public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
0308 {
0309 public:
0310   typedef _StorageIndex StorageIndex;
0311   typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
0312   typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
0313   inline BlockImpl(SparseMatrixType& xpr, Index i)
0314     : Base(xpr, i)
0315   {}
0316 
0317   inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
0318     : Base(xpr, startRow, startCol, blockRows, blockCols)
0319   {}
0320 
0321   using Base::operator=;
0322 private:
0323   template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);
0324   template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);
0325 };
0326 
0327 //----------
0328 
0329 /** Generic implementation of sparse Block expression.
0330   * Real-only.
0331   */
0332 template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
0333 class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
0334   : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
0335 {
0336     typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
0337     typedef SparseMatrixBase<BlockType> Base;
0338     using Base::convert_index;
0339 public:
0340     enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
0341     EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
0342 
0343     typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
0344 
0345     /** Column or Row constructor
0346       */
0347     inline BlockImpl(XprType& xpr, Index i)
0348       : m_matrix(xpr),
0349         m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
0350         m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
0351         m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
0352         m_blockCols(BlockCols==1 ? 1 : xpr.cols())
0353     {}
0354 
0355     /** Dynamic-size constructor
0356       */
0357     inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
0358       : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
0359     {}
0360 
0361     inline Index rows() const { return m_blockRows.value(); }
0362     inline Index cols() const { return m_blockCols.value(); }
0363 
0364     inline Scalar& coeffRef(Index row, Index col)
0365     {
0366       return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
0367     }
0368 
0369     inline const Scalar coeff(Index row, Index col) const
0370     {
0371       return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
0372     }
0373 
0374     inline Scalar& coeffRef(Index index)
0375     {
0376       return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
0377                                m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
0378     }
0379 
0380     inline const Scalar coeff(Index index) const
0381     {
0382       return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
0383                             m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
0384     }
0385 
0386     inline const XprType& nestedExpression() const { return m_matrix; }
0387     inline XprType& nestedExpression() { return m_matrix; }
0388     Index startRow() const { return m_startRow.value(); }
0389     Index startCol() const { return m_startCol.value(); }
0390     Index blockRows() const { return m_blockRows.value(); }
0391     Index blockCols() const { return m_blockCols.value(); }
0392 
0393   protected:
0394 //     friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
0395     friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
0396 
0397     Index nonZeros() const { return Dynamic; }
0398 
0399     typename internal::ref_selector<XprType>::non_const_type m_matrix;
0400     const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
0401     const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
0402     const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
0403     const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
0404 
0405   protected:
0406     // Disable assignment with clear error message.
0407     // Note that simply removing operator= yields compilation errors with ICC+MSVC
0408     template<typename T>
0409     BlockImpl& operator=(const T&)
0410     {
0411       EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
0412       return *this;
0413     }
0414 
0415 };
0416 
0417 namespace internal {
0418 
0419 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
0420 struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
0421  : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
0422 {
0423     class InnerVectorInnerIterator;
0424     class OuterVectorInnerIterator;
0425   public:
0426     typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
0427     typedef typename XprType::StorageIndex StorageIndex;
0428     typedef typename XprType::Scalar Scalar;
0429 
0430     enum {
0431       IsRowMajor = XprType::IsRowMajor,
0432 
0433       OuterVector =  (BlockCols==1 && ArgType::IsRowMajor)
0434                     | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
0435                       // revert to || as soon as not needed anymore.
0436                      (BlockRows==1 && !ArgType::IsRowMajor),
0437 
0438       CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
0439       Flags = XprType::Flags
0440     };
0441 
0442     typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;
0443 
0444     explicit unary_evaluator(const XprType& op)
0445       : m_argImpl(op.nestedExpression()), m_block(op)
0446     {}
0447 
0448     inline Index nonZerosEstimate() const {
0449       const Index nnz = m_block.nonZeros();
0450       if(nnz < 0) {
0451         // Scale the non-zero estimate for the underlying expression linearly with block size.
0452         // Return zero if the underlying block is empty.
0453         const Index nested_sz = m_block.nestedExpression().size();        
0454         return nested_sz == 0 ? 0 : m_argImpl.nonZerosEstimate() * m_block.size() / nested_sz;
0455       }
0456       return nnz;
0457     }
0458 
0459   protected:
0460     typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
0461 
0462     evaluator<ArgType> m_argImpl;
0463     const XprType &m_block;
0464 };
0465 
0466 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
0467 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
0468  : public EvalIterator
0469 {
0470   // NOTE MSVC fails to compile if we don't explicitely "import" IsRowMajor from unary_evaluator
0471   //      because the base class EvalIterator has a private IsRowMajor enum too. (bug #1786)
0472   // NOTE We cannot call it IsRowMajor because it would shadow unary_evaluator::IsRowMajor
0473   enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
0474   const XprType& m_block;
0475   Index m_end;
0476 public:
0477 
0478   EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
0479     : EvalIterator(aEval.m_argImpl, outer + (XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
0480       m_block(aEval.m_block),
0481       m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
0482   {
0483     while( (EvalIterator::operator bool()) && (EvalIterator::index() < (XprIsRowMajor ? m_block.startCol() : m_block.startRow())) )
0484       EvalIterator::operator++();
0485   }
0486 
0487   inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(XprIsRowMajor ? m_block.startCol() : m_block.startRow()); }
0488   inline Index outer()  const { return EvalIterator::outer() - (XprIsRowMajor ? m_block.startRow() : m_block.startCol()); }
0489   inline Index row()    const { return EvalIterator::row()   - m_block.startRow(); }
0490   inline Index col()    const { return EvalIterator::col()   - m_block.startCol(); }
0491 
0492   inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
0493 };
0494 
0495 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
0496 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
0497 {
0498   // NOTE see above
0499   enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
0500   const unary_evaluator& m_eval;
0501   Index m_outerPos;
0502   const Index m_innerIndex;
0503   Index m_end;
0504   EvalIterator m_it;
0505 public:
0506 
0507   EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
0508     : m_eval(aEval),
0509       m_outerPos( (XprIsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
0510       m_innerIndex(XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
0511       m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
0512       m_it(m_eval.m_argImpl, m_outerPos)
0513   {
0514     EIGEN_UNUSED_VARIABLE(outer);
0515     eigen_assert(outer==0);
0516 
0517     while(m_it && m_it.index() < m_innerIndex) ++m_it;
0518     if((!m_it) || (m_it.index()!=m_innerIndex))
0519       ++(*this);
0520   }
0521 
0522   inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (XprIsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
0523   inline Index outer()  const { return 0; }
0524   inline Index row()    const { return XprIsRowMajor ? 0 : index(); }
0525   inline Index col()    const { return XprIsRowMajor ? index() : 0; }
0526 
0527   inline Scalar value() const { return m_it.value(); }
0528   inline Scalar& valueRef() { return m_it.valueRef(); }
0529 
0530   inline OuterVectorInnerIterator& operator++()
0531   {
0532     // search next non-zero entry
0533     while(++m_outerPos<m_end)
0534     {
0535       // Restart iterator at the next inner-vector:
0536       m_it.~EvalIterator();
0537       ::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos);
0538       // search for the key m_innerIndex in the current outer-vector
0539       while(m_it && m_it.index() < m_innerIndex) ++m_it;
0540       if(m_it && m_it.index()==m_innerIndex) break;
0541     }
0542     return *this;
0543   }
0544 
0545   inline operator bool() const { return m_outerPos < m_end; }
0546 };
0547 
0548 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
0549 struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
0550   : evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
0551 {
0552   typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
0553   typedef evaluator<SparseCompressedBase<XprType> > Base;
0554   explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
0555 };
0556 
0557 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
0558 struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
0559   : evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
0560 {
0561   typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
0562   typedef evaluator<SparseCompressedBase<XprType> > Base;
0563   explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
0564 };
0565 
0566 } // end namespace internal
0567 
0568 
0569 } // end namespace Eigen
0570 
0571 #endif // EIGEN_SPARSE_BLOCK_H