Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-09-18 09:32:33

0001 #ifndef TMVA_SOFIE_RMODEL
0002 #define TMVA_SOFIE_RMODEL
0003 
0004 #include "TMVA/RModel_Base.hxx"
0005 #include "TMVA/SOFIE_common.hxx"
0006 #include "TMVA/ROperator.hxx"
0007 
0008 namespace TMVA {
0009 namespace Experimental {
0010 namespace SOFIE {
0011 
0012 class RModel final : public RModel_Base {
0013 
0014 private:
0015    bool fIsInitialized = false;
0016    bool fIsSubGraph = false;
0017    int fVerbose = 0;
0018    int fBatchSize = -1;
0019    long fReadPos = 0;  // reading file position
0020 
0021    OptimizationLevel fOptimizationLevel = OptimizationLevel::kExtended;
0022 
0023    std::unordered_map<std::string, InputTensorInfo> fInputTensorInfos; // input tensors where shape may not fully defined or other graph inputs?
0024    std::unordered_map<std::string, TensorInfo> fReadyInputTensorInfos; // input tensors where shape is full defined
0025    std::unordered_map<std::string, InitializedTensor> fInitializedTensors;
0026    std::unordered_map<std::string, TensorInfo> fIntermediateTensorInfos;
0027    std::unordered_map<std::string, DynamicTensorInfo> fDynamicTensorInfos;
0028    std::unordered_map<std::string, std::string>
0029       fShapeParams; // parameters defining the dynamic shape (e.g. batch size), store also its default value
0030    std::vector<std::string> fOutputTensorNames;
0031    std::vector<std::string> fInputTensorNames; // input tensor names using ONNX order
0032 
0033    std::vector<std::unique_ptr<ROperator>> fOperators;
0034 
0035    std::vector<std::shared_ptr<RModel>> fSubGraphs;    ///<!  sub-graph models (transient)
0036    RModel * fParentGraph = nullptr;
0037 
0038    // memory pool information for intermediate tensors
0039    MemoryPoolInfo fIntermediateMemoryInfo;    ///<!  intermediate memory info (transient)
0040    std::unordered_map<std::string_view, size_t> fIntermediateTensorFrequencyLookup;    ///<!  lookup table for intermediate tensor frequency (transient)
0041 
0042 public:
0043    /**
0044        Default constructor. Needed to allow serialization of ROOT objects. See
0045        https://root.cern/manual/io_custom_classes/#restrictions-on-types-root-io-can-handle
0046    */
0047    RModel() = default;
0048    RModel(std::string name, std::string parsedtime) : RModel_Base(name, parsedtime) {}
0049 
0050    // For GNN Functions usage
0051    RModel(std::string function_name) : RModel_Base(function_name) {}
0052 
0053    int Verbose() const { return fVerbose;}
0054 
0055    const std::vector<size_t> &GetTensorShape(std::string name) const;
0056    std::vector<Dim> GetDynamicTensorShape(std::string name) const;
0057    const ETensorType &GetTensorType(std::string name) const;
0058 
0059    bool CheckIfTensorAlreadyExist(std::string tensor_name);
0060    void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<Dim> shape);
0061    void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<size_t> shape);
0062    void AddOperator(std::unique_ptr<ROperator> op, int order_execution = -1);
0063    void AddOperatorReference(ROperator *op, int order_execution = -1)
0064    {
0065       std::unique_ptr<ROperator> tmp(op);
0066       AddOperator(std::move(tmp), order_execution);
0067    }
0068    void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape,
0069                              std::shared_ptr<void> data);
0070    void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape,
0071                              std::shared_ptr<void> data);
0072 
0073    template<class T>
0074    void AddConstantTensor(const std::string & name, const std::vector<size_t> & shape, const T * data) {
0075       size_t length = ConvertShapeToLength(shape);
0076       std::shared_ptr<void> data_ptr(malloc(length * sizeof(T)), free);
0077       std::memcpy(data_ptr.get(), (void*) data, length * sizeof(T));
0078       AddConstantTensor(name, GetTemplatedType<T>(T()), shape, data_ptr);
0079    }
0080    // for boolean can be more convenient passing an std::vector
0081    template<class T>
0082    void AddConstantTensor(const std::string & name, const std::vector<size_t> & shape, const std::vector<T> & data) {
0083       size_t length = data.size();
0084       std::shared_ptr<void> data_ptr(malloc(length * sizeof(T)), free);
0085       std::copy(data.begin(), data.end(), (T*) data_ptr.get());
0086       //std::memcpy(data_ptr.get(), (void*) data, length * sizeof(T));
0087       AddConstantTensor(name, GetTemplatedType<T>(T()), shape, data_ptr);
0088    }
0089 
0090    template <typename T>
0091    void AddInitializedTensor(const std::string & tensor_name, const std::vector<std::size_t> & shape, T *raw_data)
0092    {
0093       size_t size = ConvertShapeToLength(shape);
0094       std::shared_ptr<void> data(malloc(size * sizeof(T)), free);
0095       std::memcpy(data.get(), raw_data, size * sizeof(T));
0096       AddInitializedTensor(tensor_name,  GetTemplatedType(T()), shape, data);
0097    }
0098 
0099    // add and initialize subgraph to the model
0100    void InitializeSubGraph(std::shared_ptr<RModel>  graph);
0101 
0102    // set a flag to indicate tensor does not need to be written in a weight file
0103    // (e.g. shape tensors used as input to define a shape (in Reshape))
0104    void SetNotWritableInitializedTensor(const std::string & tensor_name);
0105 
0106    // Check if a tensor is initialized
0107    bool IsInitializedTensor(const std::string &name) const;
0108    // Check if a tensor is Constant (note a Constant tensor is also initialized)
0109    bool IsConstantTensor(const std::string &name) const;
0110    bool IsDynamicTensor(const std::string &name) const;
0111    // Check if tensor is a input dynamic tensor (without a specified shape, based on Sim structure
0112    bool IsDimInputTensor(const std::string &name) const;
0113    // check if tensor is a fully specified input tensor
0114    bool IsReadyInputTensor(const std::string &name) const;
0115 
0116    // Add intermediate tensor
0117    void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<Dim> dim_shape);
0118    void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape);
0119    // Add an intermediate dynamic tensor
0120    void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector<Dim> shape);
0121 
0122    void AddInputTensorName(std::string name);
0123    void AddOutputTensorNameList(std::vector<std::string> output_tensor_names);
0124    void
0125    UpdateOutputTensorList(std::vector<std::string> curr_output_tensor, std::vector<std::string> modify_output_tensor);
0126    void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape,
0127                                 std::shared_ptr<void> data);
0128    std::shared_ptr<void> GetInitializedTensorData(std::string tensor_name);
0129 
0130    void Initialize(int batchSize = -1, bool verbose = false);
0131    void Initialize(const std::map<std::string,size_t> & inputParams, bool verbose = false);
0132 
0133    void Generate(std::underlying_type_t<Options> options, int batchSize = -1, long pos = 0, bool verbose = false);
0134    void Generate(Options options = Options::kDefault, int batchSize = -1, int pos = 0, bool verbose = false)
0135    {
0136       Generate(static_cast<std::underlying_type_t<Options>>(options), batchSize, pos, verbose);
0137    }
0138    // generate the infer function signature. If isdecl= false generate the calling infer function
0139    // used to infer the sub-graphs
0140    std::string GenerateInferSignature(bool isdecl = true);
0141 
0142    // calculate total intermediate memory and position intermediate tensor addresses
0143    std::string AllocateIntermediateMemory(std::span<const std::string_view> op_output_tensors);
0144    void CheckAndFlushIntermediateMemory(std::span<const std::string_view> op_output_tensors, const size_t& op_idx);
0145 
0146    void SetOptimizationLevel(const OptimizationLevel &optim_level) { fOptimizationLevel = optim_level; }
0147 
0148 protected:
0149    // internal functions
0150    // generate code for the initialized tensors
0151    void GenerateInitializedTensorInfo();
0152    // generate code for the intermediate tensors
0153    void GenerateIntermediateTensorInfo();
0154    // generate code for the dynamic tensors
0155    void GenerateDynamicTensorInfo();
0156    // generate code for declarations needed by operators
0157    void GenerateOperatorDeclarations();
0158    // generate code for inference
0159    void GenerateOutput();
0160    // generate code for initializing memory pool for intermediate tensors
0161    void GenerateIntermediateMemoryPool();
0162    // Generate all session code
0163    void GenerateSessionCode();
0164 
0165 public:
0166    const std::vector<std::string> &GetInputTensorNames() const { return fInputTensorNames; }
0167    const std::vector<std::string> &GetOutputTensorNames() const { return fOutputTensorNames; }
0168 
0169    void ReadInitializedTensorsFromFile(long);
0170    long WriteInitializedTensorsToFile(std::string filename = "");
0171 
0172    void PrintIntermediateTensors();
0173    void PrintOutputTensors();
0174    void OutputGenerated(std::string filename = "", bool append = false);
0175    std::vector<std::string> GetOutputTensorNames() { return fOutputTensorNames; }
0176    void SetFilename(std::string filename) { fName = filename; }
0177 
0178    /*
0179       template <typename T>
0180       void AddInitializedTensor(std::string tensor_name, RTensor<T> new_tensor){
0181          //a view only
0182          T obj;
0183          if (fInitializedTensors.find(tensor_name) != fInitializedTensors.end()){
0184             throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
0185          }
0186          InitializedTensor new_tensor_ {GetTemplatedType(obj), new_tensor.GetShape() ,
0187       static_cast<void>(new_tensor.GetData())}; fInitializedTensors[tensor_name] = new_tensor_;
0188       }
0189    */
0190 
0191    void PrintRequiredInputTensors();
0192    void PrintInitializedTensors();
0193    void PrintDynamicTensors();
0194    void HeadInitializedTensors(std::string name, int n_print = 50);
0195 
0196    bool UseSession() const { return fUseSession; }
0197 
0198    // Use the ClassDef macro to allow definition of custom streaming
0199    ClassDefNV(RModel, 3);
0200 };
0201 
0202 } // namespace SOFIE
0203 } // namespace Experimental
0204 } // namespace TMVA
0205 
0206 #endif // TMVA_SOFIE_RMODEL