|
|
|||
File indexing completed on 2025-10-27 08:03:34
0001 // 0002 // ******************************************************************** 0003 // * License and Disclaimer * 0004 // * * 0005 // * The Geant4 software is copyright of the Copyright Holders of * 0006 // * the Geant4 Collaboration. It is provided under the terms and * 0007 // * conditions of the Geant4 Software License, included in the file * 0008 // * LICENSE and available at http://cern.ch/geant4/license . These * 0009 // * include a list of copyright holders. * 0010 // * * 0011 // * Neither the authors of this software system, nor their employing * 0012 // * institutes,nor the agencies providing financial support for this * 0013 // * work make any representation or warranty, express or implied, * 0014 // * regarding this software system or assume any liability for its * 0015 // * use. Please see the license in the file LICENSE and URL above * 0016 // * for the full disclaimer and the limitation of liability. * 0017 // * * 0018 // * This code implementation is the result of the scientific and * 0019 // * technical work of the GEANT4 collaboration. * 0020 // * By using, copying, modifying or distributing the software (or * 0021 // * any work based on the software) you agree to acknowledge its * 0022 // * use in resulting scientific publications, and indicate your * 0023 // * acceptance of all terms of the Geant4 Software license. * 0024 // ******************************************************************** 0025 // 0026 0027 #ifdef USE_INFERENCE_ONNX 0028 # ifndef PAR04ONNXINFERENCE_HH 0029 # define PAR04ONNXINFERENCE_HH 0030 # include "Par04InferenceInterface.hh" // for Par04InferenceInterface 0031 # include "onnxruntime_c_api.h" // for OrtMemoryInfo 0032 # include "onnxruntime_cxx_api.h" // for Env, Session, SessionO... 0033 0034 # include <G4String.hh> // for G4String 0035 # include <G4Types.hh> // for G4int, G4double 0036 # include <memory> // for unique_ptr 0037 # include <vector> // for vector 0038 0039 /** 0040 * @brief Inference using the ONNX runtime. 0041 * 0042 * Creates an enviroment whcih manages an internal thread pool and creates an 0043 * inference session for the model saved as an ONNX file. 0044 * Runs the inference in the session using the input vector from Par04InferenceSetup. 0045 * 0046 **/ 0047 0048 class Par04OnnxInference : public Par04InferenceInterface 0049 { 0050 public: 0051 Par04OnnxInference(G4String, G4int, G4int, G4int, 0052 G4int, // For Execution Provider Runtime Flags (for now only CUDA) 0053 std::vector<const char*>& cuda_keys, std::vector<const char*>& cuda_values, 0054 G4String, G4String); 0055 0056 Par04OnnxInference(); 0057 0058 /// Run inference 0059 /// @param[in] aGenVector Input latent space and conditions 0060 /// @param[out] aEnergies Model output = generated shower energies 0061 /// @param[in] aSize Size of the output 0062 void RunInference(std::vector<float> aGenVector, std::vector<G4double>& aEnergies, int aSize); 0063 0064 private: 0065 /// Pointer to the ONNX enviroment 0066 std::unique_ptr<Ort::Env> fEnv; 0067 /// Pointer to the ONNX inference session 0068 std::unique_ptr<Ort::Session> fSession; 0069 /// ONNX settings 0070 Ort::SessionOptions fSessionOptions; 0071 /// ONNX memory info 0072 const OrtMemoryInfo* fInfo; 0073 struct MemoryInfo; 0074 /// the input names represent the names given to the model 0075 /// when defining the model's architecture (if applicable) 0076 /// they can also be retrieved from model.summary() 0077 std::vector<const char*> fInames; 0078 }; 0079 0080 # endif /* PAR04ONNXINFERENCE_HH */ 0081 #endif
| [ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
|
This page was automatically generated by the 2.3.7 LXR engine. The LXR team |
|