Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-02-22 10:31:29

0001 //----------------------------------*-C++-*----------------------------------//
0002 // Copyright 2020-2024 UT-Battelle, LLC, and other Celeritas developers.
0003 // See the top-level COPYRIGHT file for details.
0004 // SPDX-License-Identifier: (Apache-2.0 OR MIT)
0005 //---------------------------------------------------------------------------//
0006 //! \file celeritas/phys/PhysicsData.hh
0007 //---------------------------------------------------------------------------//
0008 #pragma once
0009 
0010 #include "corecel/cont/Array.hh"
0011 #include "corecel/data/Collection.hh"
0012 #include "corecel/data/CollectionBuilder.hh"
0013 #include "corecel/data/StackAllocatorData.hh"
0014 #include "celeritas/Quantities.hh"
0015 #include "celeritas/Types.hh"
0016 #include "celeritas/em/data/AtomicRelaxationData.hh"
0017 #include "celeritas/em/data/EPlusGGData.hh"
0018 #include "celeritas/em/data/LivermorePEData.hh"
0019 #include "celeritas/grid/ValueGridType.hh"
0020 #include "celeritas/grid/XsGridData.hh"
0021 #include "celeritas/neutron/data/NeutronElasticData.hh"
0022 
0023 #include "Interaction.hh"
0024 #include "Secondary.hh"
0025 
0026 namespace celeritas
0027 {
0028 //---------------------------------------------------------------------------//
0029 // TYPES
0030 //---------------------------------------------------------------------------//
0031 //! Currently all value grids are cross section grids
0032 using ValueGrid = XsGridData;
0033 using ValueGridId = OpaqueId<XsGridData>;
0034 using ValueTableId = OpaqueId<struct ValueTable>;
0035 
0036 //---------------------------------------------------------------------------//
0037 // PARAMS
0038 //---------------------------------------------------------------------------//
0039 /*!
0040  * Set of value grids for all elements or materials.
0041  *
0042  * It is allowable for this to be "false" (i.e. no materials assigned)
0043  * indicating that the value table doesn't apply in the context -- for
0044  * example, an empty ValueTable macro_xs means that the process doesn't have a
0045  * discrete interaction.
0046  */
0047 struct ValueTable
0048 {
0049     ItemRange<ValueGridId> grids;  //!< Value grid by element or material index
0050 
0051     //! True if assigned
0052     explicit CELER_FUNCTION operator bool() const { return !grids.empty(); }
0053 };
0054 
0055 //---------------------------------------------------------------------------//
0056 /*!
0057  * Set of cross section CDF tables for a model.
0058  *
0059  * Each material has a set of value grids for its constituent elements; these
0060  * are used to sample an element from a material when required by a discrete
0061  * interaction. A null ValueTableId means the material only has a single
0062  * element, so no cross sections need to be stored. An empty ModelXsTable means
0063  * no element selection is required for the model.
0064  */
0065 struct ModelXsTable
0066 {
0067     ItemRange<ValueTableId> material;  //!< Value table by material index
0068 
0069     //! True if assigned
0070     explicit CELER_FUNCTION operator bool() const { return !material.empty(); }
0071 };
0072 
0073 //---------------------------------------------------------------------------//
0074 /*!
0075  * Energy-dependent model IDs for a single process and particle type.
0076  *
0077  * For a given particle type, a single process should be divided into multiple
0078  * models as a function of energy. The ModelGroup represents this with an
0079  * energy grid, and each cell of the grid corresponding to a particular
0080  * ModelId.
0081  */
0082 struct ModelGroup
0083 {
0084     using Energy = units::MevEnergy;
0085 
0086     ItemRange<real_type> energy;  //!< Energy grid bounds [MeV]
0087     ItemRange<ParticleModelId> model;  //!< Corresponding models
0088 
0089     //! True if assigned
0090     explicit CELER_FUNCTION operator bool() const
0091     {
0092         return (energy.size() >= 2) && (model.size() + 1 == energy.size());
0093     }
0094 };
0095 
0096 //---------------------------------------------------------------------------//
0097 /*!
0098  * Particle-process that uses MC integration to sample interaction length.
0099  *
0100  * This is needed for the integral approach for correctly sampling the discrete
0101  * interaction length after a particle loses energy along a step. An \c
0102  * IntegralXsProcess is stored for each particle-process. This will be "false"
0103  * (i.e. no energy_max assigned) if the particle associated with the process
0104  * does not have energy loss processes or if \c use_integral_xs is false.
0105  */
0106 struct IntegralXsProcess
0107 {
0108     ItemRange<real_type> energy_max_xs;  //!< Energy of the largest xs [mat]
0109 
0110     //! True if assigned
0111     explicit CELER_FUNCTION operator bool() const
0112     {
0113         return !energy_max_xs.empty();
0114     }
0115 };
0116 
0117 //---------------------------------------------------------------------------//
0118 /*!
0119  * Processes for a single particle type.
0120  *
0121  * Each index should be accessed with type ParticleProcessId. The "tables" are
0122  * a fixed-size number of ItemRange references to ValueTables. The first index
0123  * of the table (hard-coded) corresponds to ValueGridType; the second index is
0124  * a ParticleProcessId. So the cross sections for ParticleProcessId{2} would
0125  * be \code tables[ValueGridType::macro_xs][2] \endcode. This
0126  * awkward access is encapsulated by the PhysicsTrackView. \c integral_xs will
0127  * only be assigned if the integral approach is used and the particle has
0128  * continuous-discrete processes.
0129  */
0130 struct ProcessGroup
0131 {
0132     ItemRange<ProcessId> processes;  //!< Processes that apply [ppid]
0133     ValueGridArray<ItemRange<ValueTable>> tables;  //!< [vgt][ppid]
0134     ItemRange<IntegralXsProcess> integral_xs;  //!< [ppid]
0135     ItemRange<ModelGroup> models;  //!< Model applicability [ppid]
0136     ParticleProcessId eloss_ppid{};  //!< Process with de/dx and range tables
0137     bool has_at_rest{};  //!< Whether the particle type has an at-rest process
0138 
0139     //! True if assigned and valid
0140     explicit CELER_FUNCTION operator bool() const
0141     {
0142         return !processes.empty() && models.size() == processes.size();
0143     }
0144 
0145     //! Number of processes that apply
0146     CELER_FUNCTION ParticleProcessId::size_type size() const
0147     {
0148         return processes.size();
0149     }
0150 };
0151 
0152 //---------------------------------------------------------------------------//
0153 /*!
0154  * Model data for special hardwired cases (on-the-fly xs calculations).
0155  *
0156  * TODO: livermore/relaxation are owned by other classes, but
0157  * because we assign <host, value> -> { <host, cref> ; <device, value> ->
0158  * <device, cref> }
0159  */
0160 template<Ownership W, MemSpace M>
0161 struct HardwiredModels
0162 {
0163     //// DATA ////
0164 
0165     // Photoelectric effect
0166     ProcessId photoelectric;
0167     units::MevEnergy photoelectric_table_thresh;
0168     ModelId livermore_pe;
0169     LivermorePEData<W, M> livermore_pe_data;
0170     AtomicRelaxParamsData<W, M> relaxation_data;
0171 
0172     // Positron annihilation
0173     ProcessId positron_annihilation;
0174     ModelId eplusgg;
0175     EPlusGGData eplusgg_data;
0176 
0177     // Neutron elastic
0178     ProcessId neutron_elastic;
0179     ModelId chips;
0180     NeutronElasticData<W, M> chips_data;
0181 
0182     //// MEMBER FUNCTIONS ////
0183 
0184     //! Assign from another set of hardwired models
0185     template<Ownership W2, MemSpace M2>
0186     HardwiredModels& operator=(HardwiredModels<W2, M2> const& other)
0187     {
0188         // Note: don't require the other set of hardwired models to be assigned
0189         photoelectric = other.photoelectric;
0190         if (photoelectric)
0191         {
0192             // Only assign photoelectric data if that process is present
0193             photoelectric_table_thresh = other.photoelectric_table_thresh;
0194             livermore_pe = other.livermore_pe;
0195             livermore_pe_data = other.livermore_pe_data;
0196         }
0197         relaxation_data = other.relaxation_data;
0198         positron_annihilation = other.positron_annihilation;
0199         eplusgg = other.eplusgg;
0200         eplusgg_data = other.eplusgg_data;
0201 
0202         neutron_elastic = other.neutron_elastic;
0203         if (neutron_elastic)
0204         {
0205             // Only assign neutron_elastic data if that process is present
0206             chips = other.chips;
0207             chips_data = other.chips_data;
0208         }
0209 
0210         return *this;
0211     }
0212 };
0213 
0214 //---------------------------------------------------------------------------//
0215 /*!
0216  * Scalar (no template needed) quantities used by physics.
0217  *
0218  * The user-configurable constants and multiple scattering options are
0219  * described in \c PhysicsParams .
0220  *
0221  * The \c model_to_action value corresponds to the \c ActionId for the first \c
0222  * ModelId . Additionally it implies (by construction in physics_params) the
0223  * action IDs of several other physics actions.
0224  */
0225 struct PhysicsParamsScalars
0226 {
0227     using Energy = units::MevEnergy;
0228 
0229     //! Highest number of processes for any particle type
0230     ProcessId::size_type max_particle_processes{};
0231     //! Offset to create an ActionId from a ModelId
0232     ActionId::size_type model_to_action{};
0233     //! Number of physics models
0234     ModelId::size_type num_models{};
0235 
0236     // User-configurable constants
0237     real_type min_range{};  //!< rho [len]
0238     real_type max_step_over_range{};  //!< alpha [unitless]
0239     real_type min_eprime_over_e{};  //!< xi [unitless]
0240     Energy lowest_electron_energy{};  //!< Lowest e-/e+ kinetic energy
0241     real_type linear_loss_limit{};  //!< For scaled range calculation
0242     real_type fixed_step_limiter{};  //!< Global charged step size limit [len]
0243 
0244     // User-configurable multiple scattering options
0245     real_type lambda_limit{};  //!< lambda limit
0246     real_type range_factor{};  //!< range factor for e-/e+ (0.2 for muon/h)
0247     real_type safety_factor{};  //!< safety factor
0248     MscStepLimitAlgorithm step_limit_algorithm{MscStepLimitAlgorithm::size_};
0249 
0250     real_type secondary_stack_factor = 3;  //!< Secondary storage per state
0251                                            //!< size
0252 
0253     // When fixed step limiter is used, this is the corresponding action ID
0254     ActionId fixed_step_action{};
0255 
0256     //! True if assigned
0257     explicit CELER_FUNCTION operator bool() const
0258     {
0259         return max_particle_processes > 0 && model_to_action >= 4
0260                && num_models > 0 && min_range > 0 && max_step_over_range > 0
0261                && min_eprime_over_e > 0
0262                && lowest_electron_energy > zero_quantity()
0263                && linear_loss_limit > 0 && secondary_stack_factor > 0
0264                && ((fixed_step_limiter > 0)
0265                    == static_cast<bool>(fixed_step_action))
0266                && lambda_limit > 0 && range_factor > 0 && range_factor < 1
0267                && safety_factor >= 0.1
0268                && step_limit_algorithm != MscStepLimitAlgorithm::size_;
0269     }
0270 
0271     //! Stop early due to MSC limitation
0272     CELER_FORCEINLINE_FUNCTION ActionId msc_action() const
0273     {
0274         return ActionId{model_to_action - 4};
0275     }
0276 
0277     //! Stop early due to range limitation
0278     CELER_FORCEINLINE_FUNCTION ActionId range_action() const
0279     {
0280         return ActionId{model_to_action - 3};
0281     }
0282 
0283     //! Undergo a discrete interaction
0284     CELER_FORCEINLINE_FUNCTION ActionId discrete_action() const
0285     {
0286         return ActionId{model_to_action - 2};
0287     }
0288 
0289     //! Indicate a discrete interaction was rejected by the integral method
0290     CELER_FORCEINLINE_FUNCTION ActionId integral_rejection_action() const
0291     {
0292         return ActionId{model_to_action - 1};
0293     }
0294 
0295     //! Indicate an interaction failed to allocate memory
0296     CELER_FORCEINLINE_FUNCTION ActionId failure_action() const
0297     {
0298         return ActionId{model_to_action + num_models};
0299     }
0300 };
0301 
0302 //---------------------------------------------------------------------------//
0303 /*!
0304  * Persistent shared physics data.
0305  *
0306  * This includes macroscopic cross section, energy loss, and range tables
0307  * ordered by [particle][process][material][energy].
0308  *
0309  * So the first applicable process (ProcessId{0}) for an arbitrary particle
0310  * (ParticleId{1}) in material 2 (MaterialId{2}) will have the following
0311  * ID and cross section grid: \code
0312    ProcessId proc_id = params.particle[1].processes[0];
0313    const UniformGridData& grid
0314        =
0315  params.particle[1].table[int(ValueGridType::macro_xs)][0].material[2].log_energy;
0316  * \endcode
0317  */
0318 template<Ownership W, MemSpace M>
0319 struct PhysicsParamsData
0320 {
0321     //// TYPES ////
0322 
0323     template<class T>
0324     using Items = Collection<T, W, M>;
0325     template<class T>
0326     using ParticleItems = Collection<T, W, M, ParticleId>;
0327     template<class T>
0328     using ParticleModelItems = Collection<T, W, M, ParticleModelId>;
0329 
0330     //// DATA ////
0331 
0332     // Backend storage
0333     Items<real_type> reals;
0334     Items<ParticleModelId> pmodel_ids;
0335     Items<ValueGrid> value_grids;
0336     Items<ValueGridId> value_grid_ids;
0337     Items<ProcessId> process_ids;
0338     Items<ValueTable> value_tables;
0339     Items<ValueTableId> value_table_ids;
0340     Items<IntegralXsProcess> integral_xs;
0341     Items<ModelGroup> model_groups;
0342     ParticleItems<ProcessGroup> process_groups;
0343     ParticleModelItems<ModelId> model_ids;
0344     ParticleModelItems<ModelXsTable> model_xs;
0345 
0346     // Special data
0347     HardwiredModels<W, M> hardwired;
0348 
0349     // Non-templated data
0350     PhysicsParamsScalars scalars;
0351 
0352     //// METHODS ////
0353 
0354     //! True if assigned
0355     explicit CELER_FUNCTION operator bool() const
0356     {
0357         return !process_groups.empty() && !model_ids.empty() && scalars;
0358     }
0359 
0360     //! Assign from another set of data
0361     template<Ownership W2, MemSpace M2>
0362     PhysicsParamsData& operator=(PhysicsParamsData<W2, M2> const& other)
0363     {
0364         CELER_EXPECT(other);
0365 
0366         reals = other.reals;
0367         pmodel_ids = other.pmodel_ids;
0368         value_grids = other.value_grids;
0369         value_grid_ids = other.value_grid_ids;
0370         process_ids = other.process_ids;
0371         value_tables = other.value_tables;
0372         value_table_ids = other.value_table_ids;
0373         integral_xs = other.integral_xs;
0374         model_groups = other.model_groups;
0375         process_groups = other.process_groups;
0376         model_ids = other.model_ids;
0377         model_xs = other.model_xs;
0378 
0379         hardwired = other.hardwired;
0380 
0381         scalars = other.scalars;
0382 
0383         return *this;
0384     }
0385 };
0386 
0387 //---------------------------------------------------------------------------//
0388 // STATE
0389 //---------------------------------------------------------------------------//
0390 /*!
0391  * Physics state data for a single track.
0392  *
0393  * State that's persistent across steps:
0394  * - Remaining number of mean free paths to the next discrete interaction
0395  *
0396  * State that is reset at every step:
0397  * - Current macroscopic cross section
0398  * - Within-step energy deposition
0399  * - Within-step energy loss range
0400  * - Secondaries emitted from an interaction
0401  * - Discrete process element selection
0402  */
0403 struct PhysicsTrackState
0404 {
0405     real_type interaction_mfp;  //!< Remaining MFP to interaction
0406 
0407     // TEMPORARY STATE
0408     real_type macro_xs;  //!< Total cross section for discrete interactions
0409     real_type energy_deposition;  //!< Local energy deposition in a step [MeV]
0410     real_type dedx_range;  //!< Local energy loss range [len]
0411     MscRange msc_range;  //!< Range properties for multiple scattering
0412     Span<Secondary> secondaries;  //!< Emitted secondaries
0413     ElementComponentId element;  //!< Element sampled for interaction
0414 };
0415 
0416 //---------------------------------------------------------------------------//
0417 /*!
0418  * Initialize a physics track state.
0419  *
0420  * Currently no data is required at initialization -- it all must be evaluated
0421  * by the physics kernels itself.
0422  */
0423 struct PhysicsTrackInitializer
0424 {
0425 };
0426 
0427 //---------------------------------------------------------------------------//
0428 /*!
0429  * Dynamic physics (models, processes) state data.
0430  *
0431  * The "xs scratch space" is a 2D array of reals, indexed with
0432  * [track_id][el_component_id], where the fast-moving dimension has the
0433  * greatest number of element components of any material in the problem. This
0434  * can be used for the physics to calculate microscopic cross sections.
0435  */
0436 template<Ownership W, MemSpace M>
0437 struct PhysicsStateData
0438 {
0439     //// TYPES ////
0440 
0441     template<class T>
0442     using StateItems = celeritas::StateCollection<T, W, M>;
0443     template<class T>
0444     using Items = celeritas::Collection<T, W, M>;
0445 
0446     //// DATA ////
0447 
0448     StateItems<PhysicsTrackState> state;  //!< Track state [track]
0449     StateItems<MscStep> msc_step;  //!< Internal MSC data [track]
0450 
0451     Items<real_type> per_process_xs;  //!< XS [track][particle process]
0452 
0453     AtomicRelaxStateData<W, M> relaxation;  //!< Scratch data
0454     StackAllocatorData<Secondary, W, M> secondaries;  //!< Secondary stack
0455 
0456     //// METHODS ////
0457 
0458     //! True if assigned
0459     explicit CELER_FUNCTION operator bool() const
0460     {
0461         return !state.empty() && secondaries;
0462     }
0463 
0464     //! State size
0465     CELER_FUNCTION size_type size() const { return state.size(); }
0466 
0467     //! Assign from another set of states
0468     template<Ownership W2, MemSpace M2>
0469     PhysicsStateData& operator=(PhysicsStateData<W2, M2>& other)
0470     {
0471         CELER_EXPECT(other);
0472         state = other.state;
0473         msc_step = other.msc_step;
0474 
0475         per_process_xs = other.per_process_xs;
0476 
0477         relaxation = other.relaxation;
0478         secondaries = other.secondaries;
0479 
0480         return *this;
0481     }
0482 };
0483 
0484 //---------------------------------------------------------------------------//
0485 /*!
0486  * Resize the state in host code.
0487  */
0488 template<MemSpace M>
0489 inline void resize(PhysicsStateData<Ownership::value, M>* state,
0490                    HostCRef<PhysicsParamsData> const& params,
0491                    size_type size)
0492 {
0493     CELER_EXPECT(size > 0);
0494     CELER_EXPECT(params.scalars.max_particle_processes > 0);
0495     resize(&state->state, size);
0496     resize(&state->msc_step, size);
0497     resize(&state->per_process_xs,
0498            size * params.scalars.max_particle_processes);
0499     resize(&state->relaxation, params.hardwired.relaxation_data, size);
0500     resize(
0501         &state->secondaries,
0502         static_cast<size_type>(size * params.scalars.secondary_stack_factor));
0503 }
0504 
0505 //---------------------------------------------------------------------------//
0506 }  // namespace celeritas