File indexing completed on 2025-02-22 10:34:43
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #ifndef EIGEN_SPARSEMATRIX_H
0011 #define EIGEN_SPARSEMATRIX_H
0012
0013 namespace Eigen {
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 namespace internal {
0046 template<typename _Scalar, int _Options, typename _StorageIndex>
0047 struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
0048 {
0049 typedef _Scalar Scalar;
0050 typedef _StorageIndex StorageIndex;
0051 typedef Sparse StorageKind;
0052 typedef MatrixXpr XprKind;
0053 enum {
0054 RowsAtCompileTime = Dynamic,
0055 ColsAtCompileTime = Dynamic,
0056 MaxRowsAtCompileTime = Dynamic,
0057 MaxColsAtCompileTime = Dynamic,
0058 Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
0059 SupportedAccessPatterns = InnerRandomAccessPattern
0060 };
0061 };
0062
0063 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
0064 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
0065 {
0066 typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
0067 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
0068 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
0069
0070 typedef _Scalar Scalar;
0071 typedef Dense StorageKind;
0072 typedef _StorageIndex StorageIndex;
0073 typedef MatrixXpr XprKind;
0074
0075 enum {
0076 RowsAtCompileTime = Dynamic,
0077 ColsAtCompileTime = 1,
0078 MaxRowsAtCompileTime = Dynamic,
0079 MaxColsAtCompileTime = 1,
0080 Flags = LvalueBit
0081 };
0082 };
0083
0084 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
0085 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
0086 : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
0087 {
0088 enum {
0089 Flags = 0
0090 };
0091 };
0092
0093 }
0094
0095 template<typename _Scalar, int _Options, typename _StorageIndex>
0096 class SparseMatrix
0097 : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
0098 {
0099 typedef SparseCompressedBase<SparseMatrix> Base;
0100 using Base::convert_index;
0101 friend class SparseVector<_Scalar,0,_StorageIndex>;
0102 template<typename, typename, typename, typename, typename>
0103 friend struct internal::Assignment;
0104 public:
0105 using Base::isCompressed;
0106 using Base::nonZeros;
0107 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
0108 using Base::operator+=;
0109 using Base::operator-=;
0110
0111 typedef MappedSparseMatrix<Scalar,Flags> Map;
0112 typedef Diagonal<SparseMatrix> DiagonalReturnType;
0113 typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
0114 typedef typename Base::InnerIterator InnerIterator;
0115 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
0116
0117
0118 using Base::IsRowMajor;
0119 typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
0120 enum {
0121 Options = _Options
0122 };
0123
0124 typedef typename Base::IndexVector IndexVector;
0125 typedef typename Base::ScalarVector ScalarVector;
0126 protected:
0127 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
0128
0129 Index m_outerSize;
0130 Index m_innerSize;
0131 StorageIndex* m_outerIndex;
0132 StorageIndex* m_innerNonZeros;
0133 Storage m_data;
0134
0135 public:
0136
0137
0138 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
0139
0140 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
0141
0142
0143 inline Index innerSize() const { return m_innerSize; }
0144
0145 inline Index outerSize() const { return m_outerSize; }
0146
0147
0148
0149
0150 inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
0151
0152
0153
0154 inline Scalar* valuePtr() { return m_data.valuePtr(); }
0155
0156
0157
0158
0159 inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
0160
0161
0162
0163 inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
0164
0165
0166
0167
0168 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
0169
0170
0171
0172 inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
0173
0174
0175
0176
0177 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
0178
0179
0180
0181 inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
0182
0183
0184 inline Storage& data() { return m_data; }
0185
0186 inline const Storage& data() const { return m_data; }
0187
0188
0189
0190 inline Scalar coeff(Index row, Index col) const
0191 {
0192 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
0193
0194 const Index outer = IsRowMajor ? row : col;
0195 const Index inner = IsRowMajor ? col : row;
0196 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
0197 return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208 inline Scalar& coeffRef(Index row, Index col)
0209 {
0210 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
0211
0212 const Index outer = IsRowMajor ? row : col;
0213 const Index inner = IsRowMajor ? col : row;
0214
0215 Index start = m_outerIndex[outer];
0216 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
0217 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
0218 if(end<=start)
0219 return insert(row,col);
0220 const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
0221 if((p<end) && (m_data.index(p)==inner))
0222 return m_data.value(p);
0223 else
0224 return insert(row,col);
0225 }
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242 Scalar& insert(Index row, Index col);
0243
0244 public:
0245
0246
0247
0248
0249
0250
0251
0252
0253 inline void setZero()
0254 {
0255 m_data.clear();
0256 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
0257 if(m_innerNonZeros)
0258 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
0259 }
0260
0261
0262
0263
0264 inline void reserve(Index reserveSize)
0265 {
0266 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
0267 m_data.reserve(reserveSize);
0268 }
0269
0270 #ifdef EIGEN_PARSED_BY_DOXYGEN
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283 template<class SizesType>
0284 inline void reserve(const SizesType& reserveSizes);
0285 #else
0286 template<class SizesType>
0287 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
0288 #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500)
0289 typename
0290 #endif
0291 SizesType::value_type())
0292 {
0293 EIGEN_UNUSED_VARIABLE(enableif);
0294 reserveInnerVectors(reserveSizes);
0295 }
0296 #endif
0297 protected:
0298 template<class SizesType>
0299 inline void reserveInnerVectors(const SizesType& reserveSizes)
0300 {
0301 if(isCompressed())
0302 {
0303 Index totalReserveSize = 0;
0304
0305 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
0306 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
0307
0308
0309 StorageIndex* newOuterIndex = m_innerNonZeros;
0310
0311 StorageIndex count = 0;
0312 for(Index j=0; j<m_outerSize; ++j)
0313 {
0314 newOuterIndex[j] = count;
0315 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
0316 totalReserveSize += reserveSizes[j];
0317 }
0318 m_data.reserve(totalReserveSize);
0319 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
0320 for(Index j=m_outerSize-1; j>=0; --j)
0321 {
0322 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
0323 for(Index i=innerNNZ-1; i>=0; --i)
0324 {
0325 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
0326 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
0327 }
0328 previousOuterIndex = m_outerIndex[j];
0329 m_outerIndex[j] = newOuterIndex[j];
0330 m_innerNonZeros[j] = innerNNZ;
0331 }
0332 if(m_outerSize>0)
0333 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
0334
0335 m_data.resize(m_outerIndex[m_outerSize]);
0336 }
0337 else
0338 {
0339 StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
0340 if (!newOuterIndex) internal::throw_std_bad_alloc();
0341
0342 StorageIndex count = 0;
0343 for(Index j=0; j<m_outerSize; ++j)
0344 {
0345 newOuterIndex[j] = count;
0346 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
0347 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
0348 count += toReserve + m_innerNonZeros[j];
0349 }
0350 newOuterIndex[m_outerSize] = count;
0351
0352 m_data.resize(count);
0353 for(Index j=m_outerSize-1; j>=0; --j)
0354 {
0355 Index offset = newOuterIndex[j] - m_outerIndex[j];
0356 if(offset>0)
0357 {
0358 StorageIndex innerNNZ = m_innerNonZeros[j];
0359 for(Index i=innerNNZ-1; i>=0; --i)
0360 {
0361 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
0362 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
0363 }
0364 }
0365 }
0366
0367 std::swap(m_outerIndex, newOuterIndex);
0368 std::free(newOuterIndex);
0369 }
0370
0371 }
0372 public:
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 inline Scalar& insertBack(Index row, Index col)
0387 {
0388 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
0389 }
0390
0391
0392
0393 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
0394 {
0395 eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
0396 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
0397 Index p = m_outerIndex[outer+1];
0398 ++m_outerIndex[outer+1];
0399 m_data.append(Scalar(0), inner);
0400 return m_data.value(p);
0401 }
0402
0403
0404
0405 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
0406 {
0407 Index p = m_outerIndex[outer+1];
0408 ++m_outerIndex[outer+1];
0409 m_data.append(Scalar(0), inner);
0410 return m_data.value(p);
0411 }
0412
0413
0414
0415 inline void startVec(Index outer)
0416 {
0417 eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
0418 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
0419 m_outerIndex[outer+1] = m_outerIndex[outer];
0420 }
0421
0422
0423
0424
0425 inline void finalize()
0426 {
0427 if(isCompressed())
0428 {
0429 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
0430 Index i = m_outerSize;
0431
0432 while (i>=0 && m_outerIndex[i]==0)
0433 --i;
0434 ++i;
0435 while (i<=m_outerSize)
0436 {
0437 m_outerIndex[i] = size;
0438 ++i;
0439 }
0440 }
0441 }
0442
0443
0444
0445 template<typename InputIterators>
0446 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
0447
0448 template<typename InputIterators,typename DupFunctor>
0449 void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
0450
0451 void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
0452
0453 template<typename DupFunctor>
0454 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
0455
0456
0457
0458
0459
0460 Scalar& insertByOuterInner(Index j, Index i)
0461 {
0462 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
0463 }
0464
0465
0466
0467 void makeCompressed()
0468 {
0469 if(isCompressed())
0470 return;
0471
0472 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
0473
0474 Index oldStart = m_outerIndex[1];
0475 m_outerIndex[1] = m_innerNonZeros[0];
0476 for(Index j=1; j<m_outerSize; ++j)
0477 {
0478 Index nextOldStart = m_outerIndex[j+1];
0479 Index offset = oldStart - m_outerIndex[j];
0480 if(offset>0)
0481 {
0482 for(Index k=0; k<m_innerNonZeros[j]; ++k)
0483 {
0484 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
0485 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
0486 }
0487 }
0488 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
0489 oldStart = nextOldStart;
0490 }
0491 std::free(m_innerNonZeros);
0492 m_innerNonZeros = 0;
0493 m_data.resize(m_outerIndex[m_outerSize]);
0494 m_data.squeeze();
0495 }
0496
0497
0498 void uncompress()
0499 {
0500 if(m_innerNonZeros != 0)
0501 return;
0502 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
0503 for (Index i = 0; i < m_outerSize; i++)
0504 {
0505 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
0506 }
0507 }
0508
0509
0510 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
0511 {
0512 prune(default_prunning_func(reference,epsilon));
0513 }
0514
0515
0516
0517
0518
0519
0520
0521
0522 template<typename KeepFunc>
0523 void prune(const KeepFunc& keep = KeepFunc())
0524 {
0525
0526 makeCompressed();
0527
0528 StorageIndex k = 0;
0529 for(Index j=0; j<m_outerSize; ++j)
0530 {
0531 Index previousStart = m_outerIndex[j];
0532 m_outerIndex[j] = k;
0533 Index end = m_outerIndex[j+1];
0534 for(Index i=previousStart; i<end; ++i)
0535 {
0536 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
0537 {
0538 m_data.value(k) = m_data.value(i);
0539 m_data.index(k) = m_data.index(i);
0540 ++k;
0541 }
0542 }
0543 }
0544 m_outerIndex[m_outerSize] = k;
0545 m_data.resize(k,0);
0546 }
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 void conservativeResize(Index rows, Index cols)
0557 {
0558
0559 if (this->rows() == rows && this->cols() == cols) return;
0560
0561
0562 if(rows==0 || cols==0) return resize(rows,cols);
0563
0564 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
0565 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
0566 StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
0567
0568
0569 if (m_innerNonZeros)
0570 {
0571
0572 StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
0573 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
0574 m_innerNonZeros = newInnerNonZeros;
0575
0576 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
0577 m_innerNonZeros[i] = 0;
0578 }
0579 else if (innerChange < 0)
0580 {
0581
0582 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
0583 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
0584 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
0585 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
0586 for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
0587 m_innerNonZeros[i] = 0;
0588 }
0589
0590
0591 if (m_innerNonZeros && innerChange < 0)
0592 {
0593 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
0594 {
0595 StorageIndex &n = m_innerNonZeros[i];
0596 StorageIndex start = m_outerIndex[i];
0597 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
0598 }
0599 }
0600
0601 m_innerSize = newInnerSize;
0602
0603
0604 if (outerChange == 0)
0605 return;
0606
0607 StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
0608 if (!newOuterIndex) internal::throw_std_bad_alloc();
0609 m_outerIndex = newOuterIndex;
0610 if (outerChange > 0)
0611 {
0612 StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
0613 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
0614 m_outerIndex[i] = lastIdx;
0615 }
0616 m_outerSize += outerChange;
0617 }
0618
0619
0620
0621
0622
0623
0624
0625
0626 void resize(Index rows, Index cols)
0627 {
0628 const Index outerSize = IsRowMajor ? rows : cols;
0629 m_innerSize = IsRowMajor ? cols : rows;
0630 m_data.clear();
0631 if (m_outerSize != outerSize || m_outerSize==0)
0632 {
0633 std::free(m_outerIndex);
0634 m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
0635 if (!m_outerIndex) internal::throw_std_bad_alloc();
0636
0637 m_outerSize = outerSize;
0638 }
0639 if(m_innerNonZeros)
0640 {
0641 std::free(m_innerNonZeros);
0642 m_innerNonZeros = 0;
0643 }
0644 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
0645 }
0646
0647
0648
0649 void resizeNonZeros(Index size)
0650 {
0651 m_data.resize(size);
0652 }
0653
0654
0655 const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
0656
0657
0658
0659
0660
0661 DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
0662
0663
0664 inline SparseMatrix()
0665 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0666 {
0667 check_template_parameters();
0668 resize(0, 0);
0669 }
0670
0671
0672 inline SparseMatrix(Index rows, Index cols)
0673 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0674 {
0675 check_template_parameters();
0676 resize(rows, cols);
0677 }
0678
0679
0680 template<typename OtherDerived>
0681 inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
0682 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0683 {
0684 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
0685 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
0686 check_template_parameters();
0687 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
0688 if (needToTranspose)
0689 *this = other.derived();
0690 else
0691 {
0692 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
0693 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
0694 #endif
0695 internal::call_assignment_no_alias(*this, other.derived());
0696 }
0697 }
0698
0699
0700 template<typename OtherDerived, unsigned int UpLo>
0701 inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
0702 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0703 {
0704 check_template_parameters();
0705 Base::operator=(other);
0706 }
0707
0708
0709 inline SparseMatrix(const SparseMatrix& other)
0710 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0711 {
0712 check_template_parameters();
0713 *this = other.derived();
0714 }
0715
0716
0717 template<typename OtherDerived>
0718 SparseMatrix(const ReturnByValue<OtherDerived>& other)
0719 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0720 {
0721 check_template_parameters();
0722 initAssignment(other);
0723 other.evalTo(*this);
0724 }
0725
0726
0727 template<typename OtherDerived>
0728 explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
0729 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
0730 {
0731 check_template_parameters();
0732 *this = other.derived();
0733 }
0734
0735
0736
0737 inline void swap(SparseMatrix& other)
0738 {
0739
0740 std::swap(m_outerIndex, other.m_outerIndex);
0741 std::swap(m_innerSize, other.m_innerSize);
0742 std::swap(m_outerSize, other.m_outerSize);
0743 std::swap(m_innerNonZeros, other.m_innerNonZeros);
0744 m_data.swap(other.m_data);
0745 }
0746
0747
0748
0749 inline void setIdentity()
0750 {
0751 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
0752 this->m_data.resize(rows());
0753 Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
0754 Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
0755 Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
0756 std::free(m_innerNonZeros);
0757 m_innerNonZeros = 0;
0758 }
0759 inline SparseMatrix& operator=(const SparseMatrix& other)
0760 {
0761 if (other.isRValue())
0762 {
0763 swap(other.const_cast_derived());
0764 }
0765 else if(this!=&other)
0766 {
0767 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
0768 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
0769 #endif
0770 initAssignment(other);
0771 if(other.isCompressed())
0772 {
0773 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
0774 m_data = other.m_data;
0775 }
0776 else
0777 {
0778 Base::operator=(other);
0779 }
0780 }
0781 return *this;
0782 }
0783
0784 #ifndef EIGEN_PARSED_BY_DOXYGEN
0785 template<typename OtherDerived>
0786 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
0787 { return Base::operator=(other.derived()); }
0788
0789 template<typename Lhs, typename Rhs>
0790 inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
0791 #endif
0792
0793 template<typename OtherDerived>
0794 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
0795
0796 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
0797 {
0798 EIGEN_DBG_SPARSE(
0799 s << "Nonzero entries:\n";
0800 if(m.isCompressed())
0801 {
0802 for (Index i=0; i<m.nonZeros(); ++i)
0803 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
0804 }
0805 else
0806 {
0807 for (Index i=0; i<m.outerSize(); ++i)
0808 {
0809 Index p = m.m_outerIndex[i];
0810 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
0811 Index k=p;
0812 for (; k<pe; ++k) {
0813 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
0814 }
0815 for (; k<m.m_outerIndex[i+1]; ++k) {
0816 s << "(_,_) ";
0817 }
0818 }
0819 }
0820 s << std::endl;
0821 s << std::endl;
0822 s << "Outer pointers:\n";
0823 for (Index i=0; i<m.outerSize(); ++i) {
0824 s << m.m_outerIndex[i] << " ";
0825 }
0826 s << " $" << std::endl;
0827 if(!m.isCompressed())
0828 {
0829 s << "Inner non zeros:\n";
0830 for (Index i=0; i<m.outerSize(); ++i) {
0831 s << m.m_innerNonZeros[i] << " ";
0832 }
0833 s << " $" << std::endl;
0834 }
0835 s << std::endl;
0836 );
0837 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
0838 return s;
0839 }
0840
0841
0842 inline ~SparseMatrix()
0843 {
0844 std::free(m_outerIndex);
0845 std::free(m_innerNonZeros);
0846 }
0847
0848
0849 Scalar sum() const;
0850
0851 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
0852 # include EIGEN_SPARSEMATRIX_PLUGIN
0853 # endif
0854
0855 protected:
0856
0857 template<typename Other>
0858 void initAssignment(const Other& other)
0859 {
0860 resize(other.rows(), other.cols());
0861 if(m_innerNonZeros)
0862 {
0863 std::free(m_innerNonZeros);
0864 m_innerNonZeros = 0;
0865 }
0866 }
0867
0868
0869
0870 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
0871
0872
0873
0874 class SingletonVector
0875 {
0876 StorageIndex m_index;
0877 StorageIndex m_value;
0878 public:
0879 typedef StorageIndex value_type;
0880 SingletonVector(Index i, Index v)
0881 : m_index(convert_index(i)), m_value(convert_index(v))
0882 {}
0883
0884 StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
0885 };
0886
0887
0888
0889 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
0890
0891 public:
0892
0893
0894 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
0895 {
0896 const Index outer = IsRowMajor ? row : col;
0897 const Index inner = IsRowMajor ? col : row;
0898
0899 eigen_assert(!isCompressed());
0900 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
0901
0902 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
0903 m_data.index(p) = convert_index(inner);
0904 return (m_data.value(p) = Scalar(0));
0905 }
0906 protected:
0907 struct IndexPosPair {
0908 IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
0909 Index i;
0910 Index p;
0911 };
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926 template<typename DiagXpr, typename Func>
0927 void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
0928 {
0929 Index n = diagXpr.size();
0930
0931 const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
0932 if(overwrite)
0933 {
0934 if((this->rows()!=n) || (this->cols()!=n))
0935 this->resize(n, n);
0936 }
0937
0938 if(m_data.size()==0 || overwrite)
0939 {
0940 typedef Array<StorageIndex,Dynamic,1> ArrayXI;
0941 this->makeCompressed();
0942 this->resizeNonZeros(n);
0943 Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
0944 Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
0945 Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
0946 values.setZero();
0947 internal::call_assignment_no_alias(values, diagXpr, assignFunc);
0948 }
0949 else
0950 {
0951 bool isComp = isCompressed();
0952 internal::evaluator<DiagXpr> diaEval(diagXpr);
0953 std::vector<IndexPosPair> newEntries;
0954
0955
0956 for(Index i = 0; i<n; ++i)
0957 {
0958 internal::LowerBoundIndex lb = this->lower_bound(i,i);
0959 Index p = lb.value;
0960 if(lb.found)
0961 {
0962
0963 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
0964 }
0965 else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
0966 {
0967
0968 m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
0969 m_innerNonZeros[i]++;
0970 m_data.value(p) = Scalar(0);
0971 m_data.index(p) = StorageIndex(i);
0972 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
0973 }
0974 else
0975 {
0976
0977 newEntries.push_back(IndexPosPair(i,p));
0978 }
0979 }
0980
0981 Index n_entries = Index(newEntries.size());
0982 if(n_entries>0)
0983 {
0984 Storage newData(m_data.size()+n_entries);
0985 Index prev_p = 0;
0986 Index prev_i = 0;
0987 for(Index k=0; k<n_entries;++k)
0988 {
0989 Index i = newEntries[k].i;
0990 Index p = newEntries[k].p;
0991 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
0992 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
0993 for(Index j=prev_i;j<i;++j)
0994 m_outerIndex[j+1] += k;
0995 if(!isComp)
0996 m_innerNonZeros[i]++;
0997 prev_p = p;
0998 prev_i = i;
0999 newData.value(p+k) = Scalar(0);
1000 newData.index(p+k) = StorageIndex(i);
1001 assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002 }
1003 {
1004 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006 for(Index j=prev_i+1;j<=m_outerSize;++j)
1007 m_outerIndex[j] += n_entries;
1008 }
1009 m_data.swap(newData);
1010 }
1011 }
1012 }
1013
1014 private:
1015 static void check_template_parameters()
1016 {
1017 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019 }
1020
1021 struct default_prunning_func {
1022 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023 inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024 {
1025 return !internal::isMuchSmallerThan(value, reference, epsilon);
1026 }
1027 Scalar reference;
1028 RealScalar epsilon;
1029 };
1030 };
1031
1032 namespace internal {
1033
1034 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036 {
1037 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038 typedef typename SparseMatrixType::Scalar Scalar;
1039 typedef typename SparseMatrixType::StorageIndex StorageIndex;
1040 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1041
1042 if(begin!=end)
1043 {
1044
1045 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046 wi.setZero();
1047 for(InputIterator it(begin); it!=end; ++it)
1048 {
1049 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050 wi(IsRowMajor ? it->col() : it->row())++;
1051 }
1052
1053
1054 trMat.reserve(wi);
1055 for(InputIterator it(begin); it!=end; ++it)
1056 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057
1058
1059 trMat.collapseDuplicates(dup_func);
1060 }
1061
1062
1063 mat = trMat;
1064 }
1065
1066 }
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 template<typename Scalar, int _Options, typename _StorageIndex>
1107 template<typename InputIterators>
1108 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1109 {
1110 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1111 }
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 template<typename Scalar, int _Options, typename _StorageIndex>
1123 template<typename InputIterators,typename DupFunctor>
1124 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125 {
1126 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1127 }
1128
1129
1130 template<typename Scalar, int _Options, typename _StorageIndex>
1131 template<typename DupFunctor>
1132 void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
1133 {
1134 eigen_assert(!isCompressed());
1135
1136 IndexVector wi(innerSize());
1137 wi.fill(-1);
1138 StorageIndex count = 0;
1139
1140 for(Index j=0; j<outerSize(); ++j)
1141 {
1142 StorageIndex start = count;
1143 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1144 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145 {
1146 Index i = m_data.index(k);
1147 if(wi(i)>=start)
1148 {
1149
1150 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151 }
1152 else
1153 {
1154 m_data.value(count) = m_data.value(k);
1155 m_data.index(count) = m_data.index(k);
1156 wi(i) = count;
1157 ++count;
1158 }
1159 }
1160 m_outerIndex[j] = start;
1161 }
1162 m_outerIndex[m_outerSize] = count;
1163
1164
1165 std::free(m_innerNonZeros);
1166 m_innerNonZeros = 0;
1167 m_data.resize(m_outerIndex[m_outerSize]);
1168 }
1169
1170 template<typename Scalar, int _Options, typename _StorageIndex>
1171 template<typename OtherDerived>
1172 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1173 {
1174 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1175 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1176
1177 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1178 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179 #endif
1180
1181 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182 if (needToTranspose)
1183 {
1184 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1185 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186 #endif
1187
1188
1189
1190
1191 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1192 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1194 OtherCopy otherCopy(other.derived());
1195 OtherCopyEval otherCopyEval(otherCopy);
1196
1197 SparseMatrix dest(other.rows(),other.cols());
1198 Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1199
1200
1201
1202 for (Index j=0; j<otherCopy.outerSize(); ++j)
1203 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204 ++dest.m_outerIndex[it.index()];
1205
1206
1207 StorageIndex count = 0;
1208 IndexVector positions(dest.outerSize());
1209 for (Index j=0; j<dest.outerSize(); ++j)
1210 {
1211 StorageIndex tmp = dest.m_outerIndex[j];
1212 dest.m_outerIndex[j] = count;
1213 positions[j] = count;
1214 count += tmp;
1215 }
1216 dest.m_outerIndex[dest.outerSize()] = count;
1217
1218 dest.m_data.resize(count);
1219
1220 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221 {
1222 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223 {
1224 Index pos = positions[it.index()]++;
1225 dest.m_data.index(pos) = j;
1226 dest.m_data.value(pos) = it.value();
1227 }
1228 }
1229 this->swap(dest);
1230 return *this;
1231 }
1232 else
1233 {
1234 if(other.isRValue())
1235 {
1236 initAssignment(other.derived());
1237 }
1238
1239 return Base::operator=(other.derived());
1240 }
1241 }
1242
1243 template<typename _Scalar, int _Options, typename _StorageIndex>
1244 typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
1245 {
1246 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247
1248 const Index outer = IsRowMajor ? row : col;
1249 const Index inner = IsRowMajor ? col : row;
1250
1251 if(isCompressed())
1252 {
1253 if(nonZeros()==0)
1254 {
1255
1256 if(m_data.allocatedSize()==0)
1257 m_data.reserve(2*m_innerSize);
1258
1259
1260 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262
1263 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264
1265
1266
1267 StorageIndex end = convert_index(m_data.allocatedSize());
1268 for(Index j=1; j<=m_outerSize; ++j)
1269 m_outerIndex[j] = end;
1270 }
1271 else
1272 {
1273
1274 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276 for(Index j=0; j<m_outerSize; ++j)
1277 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1278 }
1279 }
1280
1281
1282 Index data_end = m_data.allocatedSize();
1283
1284
1285
1286 if(m_outerIndex[outer]==data_end)
1287 {
1288 eigen_internal_assert(m_innerNonZeros[outer]==0);
1289
1290
1291
1292 StorageIndex p = convert_index(m_data.size());
1293 Index j = outer;
1294 while(j>=0 && m_innerNonZeros[j]==0)
1295 m_outerIndex[j--] = p;
1296
1297
1298 ++m_innerNonZeros[outer];
1299 m_data.append(Scalar(0), inner);
1300
1301
1302 if(data_end != m_data.allocatedSize())
1303 {
1304
1305
1306
1307 eigen_internal_assert(data_end < m_data.allocatedSize());
1308 StorageIndex new_end = convert_index(m_data.allocatedSize());
1309 for(Index k=outer+1; k<=m_outerSize; ++k)
1310 if(m_outerIndex[k]==data_end)
1311 m_outerIndex[k] = new_end;
1312 }
1313 return m_data.value(p);
1314 }
1315
1316
1317
1318 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319 {
1320 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321
1322
1323 ++m_innerNonZeros[outer];
1324 m_data.resize(m_data.size()+1);
1325
1326
1327 if(data_end != m_data.allocatedSize())
1328 {
1329
1330
1331
1332 eigen_internal_assert(data_end < m_data.allocatedSize());
1333 StorageIndex new_end = convert_index(m_data.allocatedSize());
1334 for(Index k=outer+1; k<=m_outerSize; ++k)
1335 if(m_outerIndex[k]==data_end)
1336 m_outerIndex[k] = new_end;
1337 }
1338
1339
1340 Index startId = m_outerIndex[outer];
1341 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342 while ( (p > startId) && (m_data.index(p-1) > inner) )
1343 {
1344 m_data.index(p) = m_data.index(p-1);
1345 m_data.value(p) = m_data.value(p-1);
1346 --p;
1347 }
1348
1349 m_data.index(p) = convert_index(inner);
1350 return (m_data.value(p) = Scalar(0));
1351 }
1352
1353 if(m_data.size() != m_data.allocatedSize())
1354 {
1355
1356 m_data.resize(m_data.allocatedSize());
1357 this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1358 }
1359
1360 return insertUncompressed(row,col);
1361 }
1362
1363 template<typename _Scalar, int _Options, typename _StorageIndex>
1364 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
1365 {
1366 eigen_assert(!isCompressed());
1367
1368 const Index outer = IsRowMajor ? row : col;
1369 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1370
1371 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372 StorageIndex innerNNZ = m_innerNonZeros[outer];
1373 if(innerNNZ>=room)
1374 {
1375
1376 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377 }
1378
1379 Index startId = m_outerIndex[outer];
1380 Index p = startId + m_innerNonZeros[outer];
1381 while ( (p > startId) && (m_data.index(p-1) > inner) )
1382 {
1383 m_data.index(p) = m_data.index(p-1);
1384 m_data.value(p) = m_data.value(p-1);
1385 --p;
1386 }
1387 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388
1389 m_innerNonZeros[outer]++;
1390
1391 m_data.index(p) = inner;
1392 return (m_data.value(p) = Scalar(0));
1393 }
1394
1395 template<typename _Scalar, int _Options, typename _StorageIndex>
1396 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1397 {
1398 eigen_assert(isCompressed());
1399
1400 const Index outer = IsRowMajor ? row : col;
1401 const Index inner = IsRowMajor ? col : row;
1402
1403 Index previousOuter = outer;
1404 if (m_outerIndex[outer+1]==0)
1405 {
1406
1407 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408 {
1409 m_outerIndex[previousOuter] = convert_index(m_data.size());
1410 --previousOuter;
1411 }
1412 m_outerIndex[outer+1] = m_outerIndex[outer];
1413 }
1414
1415
1416
1417
1418 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420
1421 std::size_t startId = m_outerIndex[outer];
1422
1423 std::size_t p = m_outerIndex[outer+1];
1424 ++m_outerIndex[outer+1];
1425
1426 double reallocRatio = 1;
1427 if (m_data.allocatedSize()<=m_data.size())
1428 {
1429
1430 if (m_data.size()==0)
1431 {
1432 m_data.reserve(32);
1433 }
1434 else
1435 {
1436
1437
1438
1439 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440 reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441
1442
1443
1444 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445 }
1446 }
1447 m_data.resize(m_data.size()+1,reallocRatio);
1448
1449 if (!isLastVec)
1450 {
1451 if (previousOuter==-1)
1452 {
1453
1454
1455 for (Index k=0; k<=(outer+1); ++k)
1456 m_outerIndex[k] = 0;
1457 Index k=outer+1;
1458 while(m_outerIndex[k]==0)
1459 m_outerIndex[k++] = 1;
1460 while (k<=m_outerSize && m_outerIndex[k]!=0)
1461 m_outerIndex[k++]++;
1462 p = 0;
1463 --k;
1464 k = m_outerIndex[k]-1;
1465 while (k>0)
1466 {
1467 m_data.index(k) = m_data.index(k-1);
1468 m_data.value(k) = m_data.value(k-1);
1469 k--;
1470 }
1471 }
1472 else
1473 {
1474
1475
1476 Index j = outer+2;
1477 while (j<=m_outerSize && m_outerIndex[j]!=0)
1478 m_outerIndex[j++]++;
1479 --j;
1480
1481 Index k = m_outerIndex[j]-1;
1482 while (k>=Index(p))
1483 {
1484 m_data.index(k) = m_data.index(k-1);
1485 m_data.value(k) = m_data.value(k-1);
1486 k--;
1487 }
1488 }
1489 }
1490
1491 while ( (p > startId) && (m_data.index(p-1) > inner) )
1492 {
1493 m_data.index(p) = m_data.index(p-1);
1494 m_data.value(p) = m_data.value(p-1);
1495 --p;
1496 }
1497
1498 m_data.index(p) = inner;
1499 return (m_data.value(p) = Scalar(0));
1500 }
1501
1502 namespace internal {
1503
1504 template<typename _Scalar, int _Options, typename _StorageIndex>
1505 struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506 : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507 {
1508 typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1509 typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1510 evaluator() : Base() {}
1511 explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512 };
1513
1514 }
1515
1516 }
1517
1518 #endif