Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-11-09 09:23:40

0001 // This file is part of the ACTS project.
0002 //
0003 // Copyright (C) 2016 CERN for the benefit of the ACTS project
0004 //
0005 // This Source Code Form is subject to the terms of the Mozilla Public
0006 // License, v. 2.0. If a copy of the MPL was not distributed with this
0007 // file, You can obtain one at https://mozilla.org/MPL/2.0/.
0008 
0009 #pragma once
0010 
0011 #include "Acts/Definitions/Algebra.hpp"
0012 #include "Acts/Utilities/ArrayHelpers.hpp"
0013 #include "Acts/Utilities/MathHelpers.hpp"
0014 
0015 #include <bitset>
0016 #include <cassert>
0017 #include <optional>
0018 
0019 #include "Eigen/Dense"
0020 
0021 namespace Acts {
0022 
0023 /// Convert a bitset to a matrix of integers, with each element set to the bit
0024 /// value.
0025 /// @note How the bits are assigned to matrix elements depends on the storage
0026 /// type of the matrix being converted (row-major or col-major)
0027 /// @tparam MatrixType Matrix type that is produced
0028 /// @param bs The bitset to convert
0029 /// @return A matrix with the integer values of the bits from @p bs
0030 template <typename MatrixType>
0031 MatrixType bitsetToMatrix(const std::bitset<MatrixType::RowsAtCompileTime *
0032                                             MatrixType::ColsAtCompileTime>
0033                               bs) {
0034   constexpr int rows = MatrixType::RowsAtCompileTime;
0035   constexpr int cols = MatrixType::ColsAtCompileTime;
0036 
0037   static_assert(rows != -1 && cols != -1,
0038                 "bitsetToMatrix does not support dynamic matrices");
0039 
0040   MatrixType m;
0041   auto* p = m.data();
0042   for (std::size_t i = 0; i < rows * cols; i++) {
0043     p[i] = bs[rows * cols - 1 - i];
0044   }
0045   return m;
0046 }
0047 
0048 /// Convert an integer matrix to a bitset.
0049 /// @note How the bits are ordered depends on the storage type of the matrix
0050 /// being converted (row-major or col-major)
0051 /// @tparam Derived Eigen base concrete type
0052 /// @param m Matrix that is converted
0053 /// @return The converted bitset.
0054 template <typename Derived>
0055 auto matrixToBitset(const Eigen::PlainObjectBase<Derived>& m) {
0056   using MatrixType = Eigen::PlainObjectBase<Derived>;
0057   constexpr std::size_t rows = MatrixType::RowsAtCompileTime;
0058   constexpr std::size_t cols = MatrixType::ColsAtCompileTime;
0059 
0060   std::bitset<rows * cols> res;
0061 
0062   auto* p = m.data();
0063   for (std::size_t i = 0; i < rows * cols; i++) {
0064     res[rows * cols - 1 - i] = static_cast<bool>(p[i]);
0065   }
0066 
0067   return res;
0068 }
0069 
0070 /// @brief Perform a blocked matrix multiplication, avoiding Eigen GEMM
0071 /// methods.
0072 ///
0073 /// @tparam A The type of the first matrix, which should be MxN
0074 /// @tparam B The type of the second matrix, which should be NxP
0075 ///
0076 /// @param[in] a An MxN matrix of type A
0077 /// @param[in] b An NxP matrix of type P
0078 ///
0079 /// @returns The product ab
0080 template <typename A, typename B>
0081 inline ActsMatrix<A::RowsAtCompileTime, B::ColsAtCompileTime> blockedMult(
0082     const A& a, const B& b) {
0083   // Extract the sizes of the matrix types that we receive as template
0084   // parameters.
0085   constexpr int M = A::RowsAtCompileTime;
0086   constexpr int N = A::ColsAtCompileTime;
0087   constexpr int P = B::ColsAtCompileTime;
0088 
0089   // Ensure that the second dimension of our first matrix equals the first
0090   // dimension of the second matrix, otherwise we cannot multiply.
0091   static_assert(N == B::RowsAtCompileTime);
0092 
0093   if constexpr (M <= 4 && N <= 4 && P <= 4) {
0094     // In cases where the matrices being multiplied are small, we can rely on
0095     // Eigen do to a good job, and we don't really need to do any blocking.
0096     return a * b;
0097   } else {
0098     // Here, we want to calculate the expression: C = AB, Eigen, natively,
0099     // doesn't do a great job at this if the matrices A and B are large
0100     // (roughly M >= 8, N >= 8, or P >= 8), and applies a slow GEMM operation.
0101     // We apply a blocked matrix multiplication operation to decompose the
0102     // multiplication into smaller operations, relying on the fact that:
0103     //
0104     // ┌         ┐   ┌         ┐ ┌         ┐
0105     // │ C₁₁ C₁₂ │ = │ A₁₁ A₁₂ │ │ B₁₁ B₁₂ │
0106     // │ C₂₁ C₂₂ │ = │ A₂₁ A₂₂ │ │ B₂₁ B₂₂ │
0107     // └         ┘   └         ┘ └         ┘
0108     //
0109     // where:
0110     //
0111     // C₁₁ = A₁₁ * B₁₁ + A₁₂ * B₂₁
0112     // C₁₂ = A₁₁ * B₁₂ + A₁₂ * B₂₂
0113     // C₂₁ = A₂₁ * B₁₁ + A₂₂ * B₂₁
0114     // C₂₂ = A₂₁ * B₁₂ + A₂₂ * B₂₂
0115     //
0116     // The sizes of these submatrices are roughly half (in each dimension) that
0117     // of the parent matrix. If the size of the parent matrix is even, we can
0118     // divide it exactly, If the size of the parent matrix is odd, then some
0119     // of the submatrices will be one larger than the others. In general, for
0120     // any matrix Q, the sizes of the submatrices are (where / denotes integer
0121     // division):
0122     //
0123     // Q₁₁ : M / 2 × P / 2
0124     // Q₁₂ : M / 2 × (P + 1) / 2
0125     // Q₂₁ : (M + 1) / 2 × P / 2
0126     // Q₂₂ : (M + 1) / 2 × (P + 1) / 2
0127     //
0128     // See https://csapp.cs.cmu.edu/public/waside/waside-blocking.pdf for a
0129     // more in-depth explanation of blocked matrix multiplication.
0130     constexpr int M1 = M / 2;
0131     constexpr int M2 = (M + 1) / 2;
0132     constexpr int N1 = N / 2;
0133     constexpr int N2 = (N + 1) / 2;
0134     constexpr int P1 = P / 2;
0135     constexpr int P2 = (P + 1) / 2;
0136 
0137     // Construct the end result in this matrix, which destroys a few of Eigen's
0138     // built-in optimization techniques, but sadly this is necessary.
0139     ActsMatrix<M, P> r;
0140 
0141     // C₁₁ = A₁₁ * B₁₁ + A₁₂ * B₂₁
0142     r.template topLeftCorner<M1, P1>().noalias() =
0143         a.template topLeftCorner<M1, N1>() *
0144             b.template topLeftCorner<N1, P1>() +
0145         a.template topRightCorner<M1, N2>() *
0146             b.template bottomLeftCorner<N2, P1>();
0147 
0148     // C₁₂ = A₁₁ * B₁₂ + A₁₂ * B₂₂
0149     r.template topRightCorner<M1, P2>().noalias() =
0150         a.template topLeftCorner<M1, N1>() *
0151             b.template topRightCorner<N1, P2>() +
0152         a.template topRightCorner<M1, N2>() *
0153             b.template bottomRightCorner<N2, P2>();
0154 
0155     // C₂₁ = A₂₁ * B₁₁ + A₂₂ * B₂₁
0156     r.template bottomLeftCorner<M2, P1>().noalias() =
0157         a.template bottomLeftCorner<M2, N1>() *
0158             b.template topLeftCorner<N1, P1>() +
0159         a.template bottomRightCorner<M2, N2>() *
0160             b.template bottomLeftCorner<N2, P1>();
0161 
0162     // C₂₂ = A₂₁ * B₁₂ + A₂₂ * B₂₂
0163     r.template bottomRightCorner<M2, P2>().noalias() =
0164         a.template bottomLeftCorner<M2, N1>() *
0165             b.template topRightCorner<N1, P2>() +
0166         a.template bottomRightCorner<M2, N2>() *
0167             b.template bottomRightCorner<N2, P2>();
0168 
0169     return r;
0170   }
0171 }
0172 
0173 /// FPE "safe" functions
0174 ///
0175 /// Our main motivation for this is that users might have a strict FPE policy
0176 /// which would flag every single occurrence as a failure and then somebody has
0177 /// to investigate. Since we are processing a high number of events and floating
0178 /// point numbers sometimes work in mysterious ways the caller of this function
0179 /// might want to hide FPEs and handle them in a more controlled way.
0180 
0181 /// Calculate the inverse of an Eigen matrix after checking if it can be
0182 /// numerically inverted. This allows to catch potential FPEs before they occur.
0183 /// For matrices up to 4x4, the inverse is computed directly. For larger
0184 /// matrices, and dynamic matrices the FullPivLU is used.
0185 ///
0186 /// @tparam Derived Eigen derived concrete type
0187 /// @tparam Result Eigen result type defaulted to input type
0188 ///
0189 /// @param m Eigen matrix to invert
0190 ///
0191 /// @return The theta value
0192 template <typename MatrixType, typename ResultType = MatrixType>
0193 std::optional<ResultType> safeInverse(const MatrixType& m) noexcept {
0194   constexpr int rows = MatrixType::RowsAtCompileTime;
0195   constexpr int cols = MatrixType::ColsAtCompileTime;
0196 
0197   static_assert(rows == cols);
0198 
0199   ResultType result;
0200   bool invertible = false;
0201 
0202   if constexpr (rows > 4 || rows == -1) {
0203     Eigen::FullPivLU<MatrixType> mFullPivLU(m);
0204     if (mFullPivLU.isInvertible()) {
0205       invertible = true;
0206       result = mFullPivLU.inverse();
0207     }
0208   } else {
0209     m.computeInverseWithCheck(result, invertible);
0210   }
0211 
0212   if (invertible) {
0213     return result;
0214   }
0215 
0216   return std::nullopt;
0217 }
0218 
0219 /// Specialization of the exponent limit to be used for safe exponential,
0220 /// depending on the floating point type.
0221 /// See https://godbolt.org/z/z53Er6Mzf for reasoning for the concrete numbers.
0222 template <typename T>
0223 struct ExpSafeLimit {};
0224 template <>
0225 struct ExpSafeLimit<double> {
0226   constexpr static double value = 500.0;
0227 };
0228 template <>
0229 struct ExpSafeLimit<float> {
0230   constexpr static float value = 50.0;
0231 };
0232 
0233 /// Calculate the exponential function while avoiding FPEs.
0234 ///
0235 /// @param val argument for which the exponential function should be evaluated.
0236 ///
0237 /// @return 0 in the case of underflow, std::numeric_limits<T>::infinity in the
0238 /// case of overflow, std::exp(val) else
0239 template <typename T>
0240 constexpr T safeExp(T val) noexcept {
0241   constexpr T maxExponent = ExpSafeLimit<T>::value;
0242   constexpr T minExponent = -maxExponent;
0243   if (val < minExponent) {
0244     return 0.0;
0245   }
0246 
0247   if (val > maxExponent) {
0248     return std::numeric_limits<T>::infinity();
0249   }
0250 
0251   return std::exp(val);
0252 }
0253 
0254 /// @brief Map the indices of the lower triangular part of a symmetric N x N matrix
0255 ///        to an unrolled vector index.
0256 /// @param i The row index of the symmetric matrix
0257 /// @param k The column index of the symmetric matrix
0258 /// @return The corresponding vector index in the unrolled storage
0259 template <std::size_t N>
0260 constexpr std::size_t vecIdxFromSymMat(const std::size_t i, const std::size_t k)
0261   requires(N > 0)
0262 {
0263   assert(i < N);
0264   assert(k < N);
0265   if (k > i) {
0266     return vecIdxFromSymMat<N>(k, i);
0267   }
0268   return sumUpToN(i) + k;
0269 }
0270 /// @brief Map an unrolled vector index to the indices of the lower triangular
0271 ///        part of a symmetric N x N matrix. Inverse of `vecIdxFromSymMat`.
0272 /// @param k The unrolled vector index
0273 /// @return A pair of indices (i, j) such that the element at (i, j) in the
0274 ///         symmetric matrix corresponds to the k-th element in the unrolled
0275 ///         vector.
0276 template <std::size_t N>
0277 constexpr std::array<std::size_t, 2> symMatIndices(const std::size_t k)
0278   requires(N > 1)
0279 {
0280   assert(k < sumUpToN(N));
0281   constexpr std::size_t bound = sumUpToN(N - 1);
0282   if (k >= bound) {
0283     return std::array<std::size_t, 2>{N - 1, k - bound};
0284   }
0285   if constexpr (N > 2) {
0286     return symMatIndices<N - 1>(k);
0287   }
0288   return filledArray<std::size_t, 2>(0);
0289 }
0290 
0291 }  // namespace Acts