File indexing completed on 2025-01-18 09:57:07
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef EIGEN_SPARSEBLOCKMATRIX_H
0012 #define EIGEN_SPARSEBLOCKMATRIX_H
0013
0014 namespace Eigen {
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 template<typename _Scalar, int _BlockAtCompileTime=Dynamic, int _Options=ColMajor, typename _StorageIndex=int> class BlockSparseMatrix;
0055
0056 template<typename BlockSparseMatrixT> class BlockSparseMatrixView;
0057
0058 namespace internal {
0059 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _Index>
0060 struct traits<BlockSparseMatrix<_Scalar,_BlockAtCompileTime,_Options, _Index> >
0061 {
0062 typedef _Scalar Scalar;
0063 typedef _Index Index;
0064 typedef Sparse StorageKind;
0065 typedef MatrixXpr XprKind;
0066 enum {
0067 RowsAtCompileTime = Dynamic,
0068 ColsAtCompileTime = Dynamic,
0069 MaxRowsAtCompileTime = Dynamic,
0070 MaxColsAtCompileTime = Dynamic,
0071 BlockSize = _BlockAtCompileTime,
0072 Flags = _Options | NestByRefBit | LvalueBit,
0073 CoeffReadCost = NumTraits<Scalar>::ReadCost,
0074 SupportedAccessPatterns = InnerRandomAccessPattern
0075 };
0076 };
0077 template<typename BlockSparseMatrixT>
0078 struct traits<BlockSparseMatrixView<BlockSparseMatrixT> >
0079 {
0080 typedef Ref<Matrix<typename BlockSparseMatrixT::Scalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > Scalar;
0081 typedef Ref<Matrix<typename BlockSparseMatrixT::RealScalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > RealScalar;
0082
0083 };
0084
0085
0086 template<typename Iterator, bool IsColMajor>
0087 struct TripletComp
0088 {
0089 typedef typename Iterator::value_type Triplet;
0090 bool operator()(const Triplet& a, const Triplet& b)
0091 { if(IsColMajor)
0092 return ((a.col() == b.col() && a.row() < b.row()) || (a.col() < b.col()));
0093 else
0094 return ((a.row() == b.row() && a.col() < b.col()) || (a.row() < b.row()));
0095 }
0096 };
0097 }
0098
0099
0100
0101 template<typename BlockSparseMatrixT>
0102 class BlockSparseMatrixView : public SparseMatrixBase<BlockSparseMatrixT>
0103 {
0104 public:
0105 typedef Ref<typename BlockSparseMatrixT::BlockScalar> Scalar;
0106 typedef Ref<typename BlockSparseMatrixT::BlockRealScalar> RealScalar;
0107 typedef typename BlockSparseMatrixT::Index Index;
0108 typedef BlockSparseMatrixT Nested;
0109 enum {
0110 Flags = BlockSparseMatrixT::Options,
0111 Options = BlockSparseMatrixT::Options,
0112 RowsAtCompileTime = BlockSparseMatrixT::RowsAtCompileTime,
0113 ColsAtCompileTime = BlockSparseMatrixT::ColsAtCompileTime,
0114 MaxColsAtCompileTime = BlockSparseMatrixT::MaxColsAtCompileTime,
0115 MaxRowsAtCompileTime = BlockSparseMatrixT::MaxRowsAtCompileTime
0116 };
0117 public:
0118 BlockSparseMatrixView(const BlockSparseMatrixT& spblockmat)
0119 : m_spblockmat(spblockmat)
0120 {}
0121
0122 Index outerSize() const
0123 {
0124 return (Flags&RowMajorBit) == 1 ? this->rows() : this->cols();
0125 }
0126 Index cols() const
0127 {
0128 return m_spblockmat.blockCols();
0129 }
0130 Index rows() const
0131 {
0132 return m_spblockmat.blockRows();
0133 }
0134 Scalar coeff(Index row, Index col)
0135 {
0136 return m_spblockmat.coeff(row, col);
0137 }
0138 Scalar coeffRef(Index row, Index col)
0139 {
0140 return m_spblockmat.coeffRef(row, col);
0141 }
0142
0143 class InnerIterator : public BlockSparseMatrixT::BlockInnerIterator
0144 {
0145 public:
0146 InnerIterator(const BlockSparseMatrixView& mat, Index outer)
0147 : BlockSparseMatrixT::BlockInnerIterator(mat.m_spblockmat, outer)
0148 {}
0149
0150 };
0151
0152 protected:
0153 const BlockSparseMatrixT& m_spblockmat;
0154 };
0155
0156
0157 template<typename BlockSparseMatrixT, typename VectorType>
0158 class BlockVectorView
0159 {
0160 public:
0161 enum {
0162 BlockSize = BlockSparseMatrixT::BlockSize,
0163 ColsAtCompileTime = VectorType::ColsAtCompileTime,
0164 RowsAtCompileTime = VectorType::RowsAtCompileTime,
0165 Flags = VectorType::Flags
0166 };
0167 typedef Ref<const Matrix<typename BlockSparseMatrixT::Scalar, (RowsAtCompileTime==1)? 1 : BlockSize, (ColsAtCompileTime==1)? 1 : BlockSize> >Scalar;
0168 typedef typename BlockSparseMatrixT::Index Index;
0169 public:
0170 BlockVectorView(const BlockSparseMatrixT& spblockmat, const VectorType& vec)
0171 : m_spblockmat(spblockmat),m_vec(vec)
0172 { }
0173 inline Index cols() const
0174 {
0175 return m_vec.cols();
0176 }
0177 inline Index size() const
0178 {
0179 return m_spblockmat.blockRows();
0180 }
0181 inline Scalar coeff(Index bi) const
0182 {
0183 Index startRow = m_spblockmat.blockRowsIndex(bi);
0184 Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
0185 return m_vec.middleRows(startRow, rowSize);
0186 }
0187 inline Scalar coeff(Index bi, Index j) const
0188 {
0189 Index startRow = m_spblockmat.blockRowsIndex(bi);
0190 Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
0191 return m_vec.block(startRow, j, rowSize, 1);
0192 }
0193 protected:
0194 const BlockSparseMatrixT& m_spblockmat;
0195 const VectorType& m_vec;
0196 };
0197
0198 template<typename VectorType, typename Index> class BlockVectorReturn;
0199
0200
0201
0202 template<typename BlockSparseMatrixT, typename VectorType>
0203 class BlockVectorReturn
0204 {
0205 public:
0206 enum {
0207 ColsAtCompileTime = VectorType::ColsAtCompileTime,
0208 RowsAtCompileTime = VectorType::RowsAtCompileTime,
0209 Flags = VectorType::Flags
0210 };
0211 typedef Ref<Matrix<typename VectorType::Scalar, RowsAtCompileTime, ColsAtCompileTime> > Scalar;
0212 typedef typename BlockSparseMatrixT::Index Index;
0213 public:
0214 BlockVectorReturn(const BlockSparseMatrixT& spblockmat, VectorType& vec)
0215 : m_spblockmat(spblockmat),m_vec(vec)
0216 { }
0217 inline Index size() const
0218 {
0219 return m_spblockmat.blockRows();
0220 }
0221 inline Scalar coeffRef(Index bi)
0222 {
0223 Index startRow = m_spblockmat.blockRowsIndex(bi);
0224 Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
0225 return m_vec.middleRows(startRow, rowSize);
0226 }
0227 inline Scalar coeffRef(Index bi, Index j)
0228 {
0229 Index startRow = m_spblockmat.blockRowsIndex(bi);
0230 Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
0231 return m_vec.block(startRow, j, rowSize, 1);
0232 }
0233
0234 protected:
0235 const BlockSparseMatrixT& m_spblockmat;
0236 VectorType& m_vec;
0237 };
0238
0239
0240 template<typename Lhs, typename Rhs>
0241 class BlockSparseTimeDenseProduct;
0242
0243 namespace internal {
0244
0245 template<typename BlockSparseMatrixT, typename VecType>
0246 struct traits<BlockSparseTimeDenseProduct<BlockSparseMatrixT, VecType> >
0247 {
0248 typedef Dense StorageKind;
0249 typedef MatrixXpr XprKind;
0250 typedef typename BlockSparseMatrixT::Scalar Scalar;
0251 typedef typename BlockSparseMatrixT::Index Index;
0252 enum {
0253 RowsAtCompileTime = Dynamic,
0254 ColsAtCompileTime = Dynamic,
0255 MaxRowsAtCompileTime = Dynamic,
0256 MaxColsAtCompileTime = Dynamic,
0257 Flags = 0,
0258 CoeffReadCost = internal::traits<BlockSparseMatrixT>::CoeffReadCost
0259 };
0260 };
0261 }
0262
0263 template<typename Lhs, typename Rhs>
0264 class BlockSparseTimeDenseProduct
0265 : public ProductBase<BlockSparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
0266 {
0267 public:
0268 EIGEN_PRODUCT_PUBLIC_INTERFACE(BlockSparseTimeDenseProduct)
0269
0270 BlockSparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
0271 {}
0272
0273 template<typename Dest> void scaleAndAddTo(Dest& dest, const typename Rhs::Scalar& alpha) const
0274 {
0275 BlockVectorReturn<Lhs,Dest> tmpDest(m_lhs, dest);
0276 internal::sparse_time_dense_product( BlockSparseMatrixView<Lhs>(m_lhs), BlockVectorView<Lhs, Rhs>(m_lhs, m_rhs), tmpDest, alpha);
0277 }
0278
0279 private:
0280 BlockSparseTimeDenseProduct& operator=(const BlockSparseTimeDenseProduct&);
0281 };
0282
0283 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
0284 class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_BlockAtCompileTime, _Options,_StorageIndex> >
0285 {
0286 public:
0287 typedef _Scalar Scalar;
0288 typedef typename NumTraits<Scalar>::Real RealScalar;
0289 typedef _StorageIndex StorageIndex;
0290 typedef typename internal::ref_selector<BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex> >::type Nested;
0291
0292 enum {
0293 Options = _Options,
0294 Flags = Options,
0295 BlockSize=_BlockAtCompileTime,
0296 RowsAtCompileTime = Dynamic,
0297 ColsAtCompileTime = Dynamic,
0298 MaxRowsAtCompileTime = Dynamic,
0299 MaxColsAtCompileTime = Dynamic,
0300 IsVectorAtCompileTime = 0,
0301 IsColMajor = Flags&RowMajorBit ? 0 : 1
0302 };
0303 typedef Matrix<Scalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockScalar;
0304 typedef Matrix<RealScalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockRealScalar;
0305 typedef typename internal::conditional<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar>::type BlockScalarReturnType;
0306 typedef BlockSparseMatrix<Scalar, BlockSize, IsColMajor ? ColMajor : RowMajor, StorageIndex> PlainObject;
0307 public:
0308
0309 BlockSparseMatrix()
0310 : m_innerBSize(0),m_outerBSize(0),m_innerOffset(0),m_outerOffset(0),
0311 m_nonzerosblocks(0),m_values(0),m_blockPtr(0),m_indices(0),
0312 m_outerIndex(0),m_blockSize(BlockSize)
0313 { }
0314
0315
0316
0317
0318
0319
0320 BlockSparseMatrix(Index brow, Index bcol)
0321 : m_innerBSize(IsColMajor ? brow : bcol),
0322 m_outerBSize(IsColMajor ? bcol : brow),
0323 m_innerOffset(0),m_outerOffset(0),m_nonzerosblocks(0),
0324 m_values(0),m_blockPtr(0),m_indices(0),
0325 m_outerIndex(0),m_blockSize(BlockSize)
0326 { }
0327
0328
0329
0330
0331 BlockSparseMatrix(const BlockSparseMatrix& other)
0332 : m_innerBSize(other.m_innerBSize),m_outerBSize(other.m_outerBSize),
0333 m_nonzerosblocks(other.m_nonzerosblocks),m_nonzeros(other.m_nonzeros),
0334 m_blockPtr(0),m_blockSize(other.m_blockSize)
0335 {
0336
0337 eigen_assert(m_blockSize == BlockSize && " CAN NOT COPY BETWEEN FIXED-SIZE AND VARIABLE-SIZE BLOCKS");
0338
0339 std::copy(other.m_innerOffset, other.m_innerOffset+m_innerBSize+1, m_innerOffset);
0340 std::copy(other.m_outerOffset, other.m_outerOffset+m_outerBSize+1, m_outerOffset);
0341 std::copy(other.m_values, other.m_values+m_nonzeros, m_values);
0342
0343 if(m_blockSize != Dynamic)
0344 std::copy(other.m_blockPtr, other.m_blockPtr+m_nonzerosblocks, m_blockPtr);
0345
0346 std::copy(other.m_indices, other.m_indices+m_nonzerosblocks, m_indices);
0347 std::copy(other.m_outerIndex, other.m_outerIndex+m_outerBSize, m_outerIndex);
0348 }
0349
0350 friend void swap(BlockSparseMatrix& first, BlockSparseMatrix& second)
0351 {
0352 std::swap(first.m_innerBSize, second.m_innerBSize);
0353 std::swap(first.m_outerBSize, second.m_outerBSize);
0354 std::swap(first.m_innerOffset, second.m_innerOffset);
0355 std::swap(first.m_outerOffset, second.m_outerOffset);
0356 std::swap(first.m_nonzerosblocks, second.m_nonzerosblocks);
0357 std::swap(first.m_nonzeros, second.m_nonzeros);
0358 std::swap(first.m_values, second.m_values);
0359 std::swap(first.m_blockPtr, second.m_blockPtr);
0360 std::swap(first.m_indices, second.m_indices);
0361 std::swap(first.m_outerIndex, second.m_outerIndex);
0362 std::swap(first.m_BlockSize, second.m_blockSize);
0363 }
0364
0365 BlockSparseMatrix& operator=(BlockSparseMatrix other)
0366 {
0367
0368 swap(*this, other);
0369 return *this;
0370 }
0371
0372
0373 ~BlockSparseMatrix()
0374 {
0375 delete[] m_outerIndex;
0376 delete[] m_innerOffset;
0377 delete[] m_outerOffset;
0378 delete[] m_indices;
0379 delete[] m_blockPtr;
0380 delete[] m_values;
0381 }
0382
0383
0384
0385
0386
0387
0388 template<typename MatrixType>
0389 inline BlockSparseMatrix(const MatrixType& spmat) : m_blockSize(BlockSize)
0390 {
0391 EIGEN_STATIC_ASSERT((m_blockSize != Dynamic), THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE);
0392
0393 *this = spmat;
0394 }
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404 template<typename MatrixType>
0405 inline BlockSparseMatrix& operator=(const MatrixType& spmat)
0406 {
0407 eigen_assert((m_innerBSize != 0 && m_outerBSize != 0)
0408 && "Trying to assign to a zero-size matrix, call resize() first");
0409 eigen_assert(((MatrixType::Options&RowMajorBit) != IsColMajor) && "Wrong storage order");
0410 typedef SparseMatrix<bool,MatrixType::Options,typename MatrixType::Index> MatrixPatternType;
0411 MatrixPatternType blockPattern(blockRows(), blockCols());
0412 m_nonzeros = 0;
0413
0414
0415 for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
0416 {
0417
0418 std::vector<bool> nzblocksFlag(m_innerBSize,false);
0419 blockPattern.startVec(bj);
0420 for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
0421 {
0422 typename MatrixType::InnerIterator it_spmat(spmat, j);
0423 for(; it_spmat; ++it_spmat)
0424 {
0425 StorageIndex bi = innerToBlock(it_spmat.index());
0426 if(!nzblocksFlag[bi])
0427 {
0428
0429 nzblocksFlag[bi] = true;
0430 blockPattern.insertBackByOuterInnerUnordered(bj, bi) = true;
0431
0432 m_nonzeros += blockOuterSize(bj) * blockInnerSize(bi);
0433 }
0434 }
0435 }
0436 }
0437 blockPattern.finalize();
0438
0439
0440 setBlockStructure(blockPattern);
0441
0442 for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
0443 for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
0444 {
0445
0446 for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
0447 {
0448
0449 typename MatrixType::InnerIterator it_spmat(spmat, j);
0450 for(; it_spmat; ++it_spmat)
0451 {
0452 StorageIndex idx = 0;
0453 StorageIndex bi = innerToBlock(it_spmat.index());
0454
0455 while(bi > m_indices[m_outerIndex[bj]+idx]) ++idx;
0456 StorageIndex idxVal;
0457 if(m_blockSize == Dynamic)
0458 {
0459
0460 idxVal = m_blockPtr[m_outerIndex[bj]+idx];
0461
0462 idxVal += (j - blockOuterIndex(bj)) * blockOuterSize(bj) + it_spmat.index() - m_innerOffset[bi];
0463 }
0464 else
0465 {
0466
0467 idxVal = (m_outerIndex[bj] + idx) * m_blockSize * m_blockSize;
0468
0469 idxVal += (j - blockOuterIndex(bj)) * m_blockSize + (it_spmat.index()%m_blockSize);
0470 }
0471
0472 m_values[idxVal] = it_spmat.value();
0473 }
0474 }
0475 }
0476
0477 return *this;
0478 }
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497 template<typename MatrixType>
0498 void setBlockStructure(const MatrixType& blockPattern)
0499 {
0500 resize(blockPattern.rows(), blockPattern.cols());
0501 reserve(blockPattern.nonZeros());
0502
0503
0504 m_outerIndex[0] = 0;
0505 if(m_blockSize == Dynamic) m_blockPtr[0] = 0;
0506 for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
0507 for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
0508 {
0509
0510
0511
0512
0513 std::vector<int> nzBlockIdx;
0514 typename MatrixType::InnerIterator it(blockPattern, bj);
0515 for(; it; ++it)
0516 {
0517 nzBlockIdx.push_back(it.index());
0518 }
0519 std::sort(nzBlockIdx.begin(), nzBlockIdx.end());
0520
0521
0522 for(StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx)
0523 {
0524 StorageIndex offset = m_outerIndex[bj]+idx;
0525 m_indices[offset] = nzBlockIdx[idx];
0526 if(m_blockSize == Dynamic)
0527 m_blockPtr[offset] = m_blockPtr[offset-1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj);
0528
0529 }
0530
0531 m_outerIndex[bj+1] = m_outerIndex[bj] + nzBlockIdx.size();
0532 }
0533 }
0534
0535
0536
0537
0538 inline void resize(Index brow, Index bcol)
0539 {
0540 m_innerBSize = IsColMajor ? brow : bcol;
0541 m_outerBSize = IsColMajor ? bcol : brow;
0542 }
0543
0544
0545
0546
0547
0548
0549 inline void setBlockSize(Index blockSize)
0550 {
0551 m_blockSize = blockSize;
0552 }
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563 inline void setBlockLayout(const VectorXi& rowBlocks, const VectorXi& colBlocks)
0564 {
0565 const VectorXi& innerBlocks = IsColMajor ? rowBlocks : colBlocks;
0566 const VectorXi& outerBlocks = IsColMajor ? colBlocks : rowBlocks;
0567 eigen_assert(m_innerBSize == innerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
0568 eigen_assert(m_outerBSize == outerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
0569 m_outerBSize = outerBlocks.size();
0570
0571 m_innerOffset = new StorageIndex[m_innerBSize+1];
0572 m_outerOffset = new StorageIndex[m_outerBSize+1];
0573 m_innerOffset[0] = 0;
0574 m_outerOffset[0] = 0;
0575 std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize-1]+1, &m_innerOffset[1]);
0576 std::partial_sum(&outerBlocks[0], &outerBlocks[m_outerBSize-1]+1, &m_outerOffset[1]);
0577
0578
0579 m_nonzeros = 0;
0580 for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
0581 for(StorageIndex bi = 0; bi < m_innerBSize; ++bi)
0582 m_nonzeros += outerBlocks[bj] * innerBlocks[bi];
0583
0584 }
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596 inline void reserve(const Index nonzerosblocks)
0597 {
0598 eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) &&
0599 "TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first");
0600
0601
0602 m_outerIndex = new StorageIndex[m_outerBSize+1];
0603
0604 m_nonzerosblocks = nonzerosblocks;
0605 if(m_blockSize != Dynamic)
0606 {
0607 m_nonzeros = nonzerosblocks * (m_blockSize * m_blockSize);
0608 m_blockPtr = 0;
0609 }
0610 else
0611 {
0612
0613 m_blockPtr = new StorageIndex[m_nonzerosblocks+1];
0614 }
0615 m_indices = new StorageIndex[m_nonzerosblocks+1];
0616 m_values = new Scalar[m_nonzeros];
0617 }
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630 template<typename InputIterator>
0631 void setFromTriplets(const InputIterator& begin, const InputIterator& end)
0632 {
0633 eigen_assert((m_innerBSize!=0 && m_outerBSize !=0) && "ZERO BLOCKS, PLEASE CALL resize() before");
0634
0635
0636
0637
0638
0639 internal::TripletComp<InputIterator, IsColMajor> tripletcomp;
0640 std::sort(begin, end, tripletcomp);
0641
0642
0643
0644
0645 VectorXi rowBlocks(m_innerBSize);
0646 VectorXi colBlocks(m_outerBSize);
0647 rowBlocks.setZero(); colBlocks.setZero();
0648 VectorXi nzblock_outer(m_outerBSize);
0649 VectorXi nz_outer(m_outerBSize);
0650 nzblock_outer.setZero();
0651 nz_outer.setZero();
0652 for(InputIterator it(begin); it !=end; ++it)
0653 {
0654 eigen_assert(it->row() >= 0 && it->row() < this->blockRows() && it->col() >= 0 && it->col() < this->blockCols());
0655 eigen_assert((it->value().rows() == it->value().cols() && (it->value().rows() == m_blockSize))
0656 || (m_blockSize == Dynamic));
0657
0658 if(m_blockSize == Dynamic)
0659 {
0660 eigen_assert((rowBlocks[it->row()] == 0 || rowBlocks[it->row()] == it->value().rows()) &&
0661 "NON CORRESPONDING SIZES FOR ROW BLOCKS");
0662 eigen_assert((colBlocks[it->col()] == 0 || colBlocks[it->col()] == it->value().cols()) &&
0663 "NON CORRESPONDING SIZES FOR COLUMN BLOCKS");
0664 rowBlocks[it->row()] =it->value().rows();
0665 colBlocks[it->col()] = it->value().cols();
0666 }
0667 nz_outer(IsColMajor ? it->col() : it->row()) += it->value().rows() * it->value().cols();
0668 nzblock_outer(IsColMajor ? it->col() : it->row())++;
0669 }
0670
0671 if(m_blockSize == Dynamic) setBlockLayout(rowBlocks, colBlocks);
0672 StorageIndex nzblocks = nzblock_outer.sum();
0673 reserve(nzblocks);
0674
0675
0676 VectorXi block_id(m_outerBSize);
0677
0678
0679 m_outerIndex[0] = 0;
0680 if (m_blockSize == Dynamic) m_blockPtr[0] = 0;
0681 for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
0682 {
0683 m_outerIndex[bj+1] = m_outerIndex[bj] + nzblock_outer(bj);
0684 block_id(bj) = m_outerIndex[bj];
0685 if(m_blockSize==Dynamic)
0686 {
0687 m_blockPtr[m_outerIndex[bj+1]] = m_blockPtr[m_outerIndex[bj]] + nz_outer(bj);
0688 }
0689 }
0690
0691
0692 for(InputIterator it(begin); it!=end; ++it)
0693 {
0694 StorageIndex outer = IsColMajor ? it->col() : it->row();
0695 StorageIndex inner = IsColMajor ? it->row() : it->col();
0696 m_indices[block_id(outer)] = inner;
0697 StorageIndex block_size = it->value().rows()*it->value().cols();
0698 StorageIndex nz_marker = blockPtr(block_id[outer]);
0699 memcpy(&(m_values[nz_marker]), it->value().data(), block_size * sizeof(Scalar));
0700 if(m_blockSize == Dynamic)
0701 {
0702 m_blockPtr[block_id(outer)+1] = m_blockPtr[block_id(outer)] + block_size;
0703 }
0704 block_id(outer)++;
0705 }
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732 }
0733
0734
0735
0736
0737
0738 inline Index rows() const
0739 {
0740
0741 return (IsColMajor ? innerSize() : outerSize());
0742 }
0743
0744
0745
0746
0747 inline Index cols() const
0748 {
0749
0750 return (IsColMajor ? outerSize() : innerSize());
0751 }
0752
0753 inline Index innerSize() const
0754 {
0755 if(m_blockSize == Dynamic) return m_innerOffset[m_innerBSize];
0756 else return (m_innerBSize * m_blockSize) ;
0757 }
0758
0759 inline Index outerSize() const
0760 {
0761 if(m_blockSize == Dynamic) return m_outerOffset[m_outerBSize];
0762 else return (m_outerBSize * m_blockSize) ;
0763 }
0764
0765 inline Index blockRows() const
0766 {
0767 return (IsColMajor ? m_innerBSize : m_outerBSize);
0768 }
0769
0770 inline Index blockCols() const
0771 {
0772 return (IsColMajor ? m_outerBSize : m_innerBSize);
0773 }
0774
0775 inline Index outerBlocks() const { return m_outerBSize; }
0776 inline Index innerBlocks() const { return m_innerBSize; }
0777
0778
0779 inline Index outerToBlock(Index outer) const
0780 {
0781 eigen_assert(outer < outerSize() && "OUTER INDEX OUT OF BOUNDS");
0782
0783 if(m_blockSize != Dynamic)
0784 return (outer / m_blockSize);
0785
0786 StorageIndex b_outer = 0;
0787 while(m_outerOffset[b_outer] <= outer) ++b_outer;
0788 return b_outer - 1;
0789 }
0790
0791 inline Index innerToBlock(Index inner) const
0792 {
0793 eigen_assert(inner < innerSize() && "OUTER INDEX OUT OF BOUNDS");
0794
0795 if(m_blockSize != Dynamic)
0796 return (inner / m_blockSize);
0797
0798 StorageIndex b_inner = 0;
0799 while(m_innerOffset[b_inner] <= inner) ++b_inner;
0800 return b_inner - 1;
0801 }
0802
0803
0804
0805
0806 Ref<BlockScalar> coeffRef(Index brow, Index bcol)
0807 {
0808 eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
0809 eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS");
0810
0811 StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
0812 StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
0813 StorageIndex inner = IsColMajor ? brow : bcol;
0814 StorageIndex outer = IsColMajor ? bcol : brow;
0815 StorageIndex offset = m_outerIndex[outer];
0816 while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner)
0817 offset++;
0818 if(m_indices[offset] == inner)
0819 {
0820 return Map<BlockScalar>(&(m_values[blockPtr(offset)]), rsize, csize);
0821 }
0822 else
0823 {
0824
0825 eigen_assert("DYNAMIC INSERTION IS NOT YET SUPPORTED");
0826 }
0827 }
0828
0829
0830
0831
0832 Map<const BlockScalar> coeff(Index brow, Index bcol) const
0833 {
0834 eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
0835 eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS");
0836
0837 StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
0838 StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
0839 StorageIndex inner = IsColMajor ? brow : bcol;
0840 StorageIndex outer = IsColMajor ? bcol : brow;
0841 StorageIndex offset = m_outerIndex[outer];
0842 while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++;
0843 if(m_indices[offset] == inner)
0844 {
0845 return Map<const BlockScalar> (&(m_values[blockPtr(offset)]), rsize, csize);
0846 }
0847 else
0848
0849 eigen_assert("NOT YET SUPPORTED");
0850 }
0851
0852
0853 template<typename VecType>
0854 BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType> operator*(const VecType& lhs) const
0855 {
0856 return BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType>(*this, lhs);
0857 }
0858
0859
0860 inline Index nonZerosBlocks() const { return m_nonzerosblocks; }
0861
0862 inline Index nonZeros() const { return m_nonzeros; }
0863
0864 inline BlockScalarReturnType *valuePtr() {return static_cast<BlockScalarReturnType *>(m_values);}
0865
0866 inline StorageIndex *innerIndexPtr() {return m_indices; }
0867 inline const StorageIndex *innerIndexPtr() const {return m_indices; }
0868 inline StorageIndex *outerIndexPtr() {return m_outerIndex; }
0869 inline const StorageIndex* outerIndexPtr() const {return m_outerIndex; }
0870
0871
0872 inline bool isCompressed() const {return true;}
0873
0874
0875
0876 inline Index blockRowsIndex(Index bi) const
0877 {
0878 return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi);
0879 }
0880
0881
0882
0883
0884 inline Index blockColsIndex(Index bj) const
0885 {
0886 return IsColMajor ? blockOuterIndex(bj) : blockInnerIndex(bj);
0887 }
0888
0889 inline Index blockOuterIndex(Index bj) const
0890 {
0891 return (m_blockSize == Dynamic) ? m_outerOffset[bj] : (bj * m_blockSize);
0892 }
0893 inline Index blockInnerIndex(Index bi) const
0894 {
0895 return (m_blockSize == Dynamic) ? m_innerOffset[bi] : (bi * m_blockSize);
0896 }
0897
0898
0899 inline Index blockInnerSize(Index bi) const
0900 {
0901 return (m_blockSize == Dynamic) ? (m_innerOffset[bi+1] - m_innerOffset[bi]) : m_blockSize;
0902 }
0903 inline Index blockOuterSize(Index bj) const
0904 {
0905 return (m_blockSize == Dynamic) ? (m_outerOffset[bj+1]- m_outerOffset[bj]) : m_blockSize;
0906 }
0907
0908
0909
0910
0911 class InnerIterator;
0912
0913
0914
0915
0916 class BlockInnerIterator;
0917
0918 friend std::ostream & operator << (std::ostream & s, const BlockSparseMatrix& m)
0919 {
0920 for (StorageIndex j = 0; j < m.outerBlocks(); ++j)
0921 {
0922 BlockInnerIterator itb(m, j);
0923 for(; itb; ++itb)
0924 {
0925 s << "("<<itb.row() << ", " << itb.col() << ")\n";
0926 s << itb.value() <<"\n";
0927 }
0928 }
0929 s << std::endl;
0930 return s;
0931 }
0932
0933
0934
0935
0936 Index blockPtr(Index id) const
0937 {
0938 if(m_blockSize == Dynamic) return m_blockPtr[id];
0939 else return id * m_blockSize * m_blockSize;
0940
0941 }
0942
0943
0944 protected:
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956 Map<BlockScalar> insert(Index brow, Index bcol);
0957
0958 Index m_innerBSize;
0959 Index m_outerBSize;
0960 StorageIndex *m_innerOffset;
0961 StorageIndex *m_outerOffset;
0962 Index m_nonzerosblocks;
0963 Index m_nonzeros;
0964 Scalar *m_values;
0965 StorageIndex *m_blockPtr;
0966 StorageIndex *m_indices;
0967 StorageIndex *m_outerIndex;
0968 Index m_blockSize;
0969 };
0970
0971 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
0972 class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator
0973 {
0974 public:
0975
0976 enum{
0977 Flags = _Options
0978 };
0979
0980 BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer)
0981 : m_mat(mat),m_outer(outer),
0982 m_id(mat.m_outerIndex[outer]),
0983 m_end(mat.m_outerIndex[outer+1])
0984 {
0985 }
0986
0987 inline BlockInnerIterator& operator++() {m_id++; return *this; }
0988
0989 inline const Map<const BlockScalar> value() const
0990 {
0991 return Map<const BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
0992 rows(),cols());
0993 }
0994 inline Map<BlockScalar> valueRef()
0995 {
0996 return Map<BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
0997 rows(),cols());
0998 }
0999
1000 inline Index index() const {return m_mat.m_indices[m_id]; }
1001 inline Index outer() const { return m_outer; }
1002
1003 inline Index row() const {return index(); }
1004
1005 inline Index col() const {return outer(); }
1006
1007 inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; }
1008
1009 inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;}
1010 inline operator bool() const { return (m_id < m_end); }
1011
1012 protected:
1013 const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat;
1014 const Index m_outer;
1015 Index m_id;
1016 Index m_end;
1017 };
1018
1019 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
1020 class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::InnerIterator
1021 {
1022 public:
1023 InnerIterator(const BlockSparseMatrix& mat, Index outer)
1024 : m_mat(mat),m_outerB(mat.outerToBlock(outer)),m_outer(outer),
1025 itb(mat, mat.outerToBlock(outer)),
1026 m_offset(outer - mat.blockOuterIndex(m_outerB))
1027 {
1028 if (itb)
1029 {
1030 m_id = m_mat.blockInnerIndex(itb.index());
1031 m_start = m_id;
1032 m_end = m_mat.blockInnerIndex(itb.index()+1);
1033 }
1034 }
1035 inline InnerIterator& operator++()
1036 {
1037 m_id++;
1038 if (m_id >= m_end)
1039 {
1040 ++itb;
1041 if (itb)
1042 {
1043 m_id = m_mat.blockInnerIndex(itb.index());
1044 m_start = m_id;
1045 m_end = m_mat.blockInnerIndex(itb.index()+1);
1046 }
1047 }
1048 return *this;
1049 }
1050 inline const Scalar& value() const
1051 {
1052 return itb.value().coeff(m_id - m_start, m_offset);
1053 }
1054 inline Scalar& valueRef()
1055 {
1056 return itb.valueRef().coeff(m_id - m_start, m_offset);
1057 }
1058 inline Index index() const { return m_id; }
1059 inline Index outer() const {return m_outer; }
1060 inline Index col() const {return outer(); }
1061 inline Index row() const { return index();}
1062 inline operator bool() const
1063 {
1064 return itb;
1065 }
1066 protected:
1067 const BlockSparseMatrix& m_mat;
1068 const Index m_outer;
1069 const Index m_outerB;
1070 BlockInnerIterator itb;
1071 const Index m_offset;
1072 Index m_start;
1073 Index m_id;
1074 Index m_end;
1075
1076 };
1077 }
1078
1079 #endif