Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-12-16 09:24:39

0001 // This file is part of the ACTS project.
0002 //
0003 // Copyright (C) 2016 CERN for the benefit of the ACTS project
0004 //
0005 // This Source Code Form is subject to the terms of the Mozilla Public
0006 // License, v. 2.0. If a copy of the MPL was not distributed with this
0007 // file, You can obtain one at https://mozilla.org/MPL/2.0/.
0008 
0009 #include "Acts/Definitions/TrackParametrization.hpp"
0010 #include "Acts/Definitions/Units.hpp"
0011 #include "Acts/MagneticField/BFieldMapUtils.hpp"
0012 #include "Acts/MagneticField/InterpolatedBFieldMap.hpp"
0013 #include "Acts/MagneticField/SolenoidBField.hpp"
0014 #include "Acts/Utilities/VectorHelpers.hpp"
0015 #include "ActsTests/CommonHelpers/BenchmarkTools.hpp"
0016 
0017 #include <chrono>
0018 #include <fstream>
0019 #include <iostream>
0020 #include <numbers>
0021 #include <random>
0022 #include <string>
0023 
0024 using namespace Acts;
0025 using namespace UnitLiterals;
0026 using namespace ActsTests;
0027 
0028 int main(int argc, char* argv[]) {
0029   std::size_t iters_map = 5e2;
0030   std::size_t iters_solenoid = 3;
0031   std::size_t runs_solenoid = 1000;
0032   if (argc >= 2) {
0033     iters_map = std::stoi(argv[1]);
0034   }
0035   if (argc >= 3) {
0036     iters_solenoid = std::stoi(argv[2]);
0037   }
0038   if (argc >= 4) {
0039     runs_solenoid = std::stoi(argv[3]);
0040   }
0041 
0042   const double L = 5.8_m;
0043   const double R = (2.56 + 2.46) * 0.5 * 0.5_m;
0044   const std::size_t nCoils = 1154;
0045   const double bMagCenter = 2_T;
0046   const std::size_t nBinsR = 150;
0047   const std::size_t nBinsZ = 200;
0048 
0049   double rMin = -0.1;
0050   double rMax = R * 2.;
0051   double zMin = 2 * (-L / 2.);
0052   double zMax = 2 * (L / 2.);
0053 
0054   SolenoidBField bSolenoidField({R, L, nCoils, bMagCenter});
0055   std::cout << "Building interpolated field map" << std::endl;
0056   auto bFieldMap = solenoidFieldMap({rMin, rMax}, {zMin, zMax},
0057                                     {nBinsR, nBinsZ}, bSolenoidField);
0058   MagneticFieldContext mctx{};
0059 
0060   std::minstd_rand rng;
0061   std::uniform_real_distribution<double> zDist(1.5 * (-L / 2.), 1.5 * L / 2.);
0062   std::uniform_real_distribution<double> rDist(0, R * 1.5);
0063   std::uniform_real_distribution<double> phiDist(-std::numbers::pi,
0064                                                  std::numbers::pi);
0065   auto genPos = [&]() -> Vector3 {
0066     const double z = zDist(rng), r = rDist(rng), phi = phiDist(rng);
0067     return {r * std::cos(phi), r * std::sin(phi), z};
0068   };
0069 
0070   std::ofstream os{"bfield_bench.csv"};
0071 
0072   auto csv = [&](const std::string& name, auto res) {
0073     os << name << "," << res.run_timings.size() << "," << res.iters_per_run
0074        << "," << res.totalTime().count() << "," << res.runTimeMedian().count()
0075        << "," << 1.96 * res.runTimeError().count() << ","
0076        << res.iterTimeAverage().count() << ","
0077        << 1.96 * res.iterTimeError().count();
0078 
0079     os << std::endl;
0080   };
0081 
0082   os << "name,runs,iters,total_time,run_time_median,run_time_error,iter_"
0083         "time_average,iter_time_error"
0084      << std::endl;
0085 
0086   // SolenoidBField lookup is so slow that the cost of generating a random field
0087   // lookup position is negligible in comparison...
0088   std::cout << "Benchmarking random SolenoidBField lookup: " << std::flush;
0089   const auto solenoid_result =
0090       microBenchmark([&] { return bSolenoidField.getField(genPos()); },
0091                      iters_solenoid, runs_solenoid);
0092   std::cout << solenoid_result << std::endl;
0093   csv("solenoid", solenoid_result);
0094 
0095   // ...but for interpolated B-field map, the overhead of a field lookup is
0096   // comparable to that of generating a random position, so we must be more
0097   // careful. Hence we do two microbenchmarks which represent a kind of
0098   // lower and upper bound on field lookup performance.
0099   //
0100   // - The first benchmark operates at constant position, so it measures only
0101   //   field lookup overhead but has unrealistically good cache locality. In
0102   //   that sense, it provides a lower bound of field lookup performance.
0103   std::cout << "Benchmarking interpolated field lookup: " << std::flush;
0104   const auto fixedPos = genPos();
0105   const auto map_fixed_nocache_result =
0106       microBenchmark([&] { return bFieldMap.getField(fixedPos); }, iters_map);
0107   std::cout << map_fixed_nocache_result << std::endl;
0108   csv("interp_nocache_fixed", map_fixed_nocache_result);
0109 
0110   std::cout << "Benchmarking random interpolated field lookup: " << std::flush;
0111 
0112   // - The second benchmark generates random positions, so it is biased by the
0113   //   cost of random position generation and has unrealistically bad cache
0114   //   locality, but provides an upper bound of field lookup performance.
0115   const auto map_rand_result =
0116       microBenchmark([&] { return bFieldMap.getField(genPos()); }, iters_map);
0117   std::cout << map_rand_result << std::endl;
0118   csv("interp_nocache_random", map_rand_result);
0119 
0120   // - This variation of the first benchmark uses a fixed position again, but
0121   //   uses the cache infrastructure to evaluate how much of an impact it has on
0122   //   performance in this scenario. We expect this to improve performance as
0123   //   the cache will always be valid for the fixed point.
0124   {
0125     std::cout << "Benchmarking cached interpolated field lookup: "
0126               << std::flush;
0127     auto cache = bFieldMap.makeCache(mctx);
0128     const auto map_cached_result_cache = microBenchmark(
0129         [&] { return bFieldMap.getField(fixedPos, cache).value(); }, iters_map);
0130     std::cout << map_cached_result_cache << std::endl;
0131     csv("interp_cache_fixed", map_cached_result_cache);
0132   }
0133 
0134   // - This variation of the second benchmark again generates random positions
0135   //   and uses the cache infrastructure to evaluate the impact on performance.
0136   //   We expect this to deteriorate performance, as the cache will most likely
0137   //   be invalid and need to be recreated, on top of the underlying lookup.
0138   {
0139     std::cout << "Benchmarking cached random interpolated field lookup: "
0140               << std::flush;
0141     auto cache2 = bFieldMap.makeCache(mctx);
0142     const auto map_rand_result_cache = microBenchmark(
0143         [&] { return bFieldMap.getField(genPos(), cache2).value(); },
0144         iters_map);
0145     std::cout << map_rand_result_cache << std::endl;
0146     csv("interp_cache_random", map_rand_result_cache);
0147   }
0148 
0149   // - The fourth benchmark tests a more 'realistic' access pattern than fixed
0150   //   or random positions: it advances along a straight line (which is close to
0151   //   a slightly curved line which happens in particle propagation). This
0152   //   instance does not use the cache infrastructure, so is effectively close
0153   //   to the random points benchmark, although positions are not really random.
0154   {
0155     std::cout << "Benchmarking advancing interpolated field lookup: "
0156               << std::flush;
0157     Vector3 pos{0, 0, 0};
0158     Vector3 dir{};
0159     dir.setRandom();
0160     double h = 1e-3;
0161     std::vector<Vector3> steps;
0162     steps.reserve(iters_map);
0163     for (std::size_t i = 0; i < iters_map; i++) {
0164       pos += dir * h;
0165       double z = pos[eFreePos2];
0166       if (VectorHelpers::perp(pos) > rMax || z >= zMax || z < zMin) {
0167         break;
0168       }
0169       steps.push_back(pos);
0170     }
0171     const auto map_adv_result = microBenchmark(
0172         [&](const auto& s) { return bFieldMap.getField(s); }, steps);
0173     std::cout << map_adv_result << std::endl;
0174     csv("interp_nocache_adv", map_adv_result);
0175 
0176     // - This variation of the fourth benchmark advances in a straight line, but
0177     //   also uses the cache infrastructure. As subsequent positions are close
0178     //   to one another, the cache will be valid for a certain number of points,
0179     //   before becoming invalid. This means we expect performance to improve
0180     //   over the uncached straight line advance.
0181 
0182     std::cout << "Benchmarking cached advancing interpolated field lookup: "
0183               << std::flush;
0184     auto cache = bFieldMap.makeCache(mctx);
0185     const auto map_adv_result_cache = microBenchmark(
0186         [&](const auto& s) { return bFieldMap.getField(s, cache).value(); },
0187         steps);
0188     std::cout << map_adv_result_cache << std::endl;
0189     csv("interp_cache_adv", map_adv_result_cache);
0190   }
0191 }