Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/root/TMVA/RModel.hxx was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 #ifndef TMVA_SOFIE_RMODEL
0002 #define TMVA_SOFIE_RMODEL
0003 
0004 #include "TMVA/RModel_Base.hxx"
0005 #include "TMVA/SOFIE_common.hxx"
0006 #include "TMVA/ROperator.hxx"
0007 
0008 namespace TMVA {
0009 namespace Experimental {
0010 namespace SOFIE {
0011 
0012 class RModel final : public RModel_Base {
0013 
0014 private:
0015    bool fIsInitialized = false;
0016    bool fIsSubGraph = false;
0017    int fVerbose = 0;
0018    int fBatchSize = -1;
0019    long fReadPos = 0;  // reading file position
0020    size_t fConstantTensorSize = 0; // size  (in Bytes) of the allocated constant tensors
0021    size_t fWeightsTensorSize = 0;  // size  (in Bytes) of the allocated weight tensors
0022    size_t fOtherTensorSize = 0;    // size  (in Bytes) of intermediate tensors which are not managed by the memory pool
0023 
0024    OptimizationLevel fOptimizationLevel = OptimizationLevel::kExtended;
0025 
0026    std::unordered_map<std::string, InputTensorInfo> fInputTensorInfos; // input tensors where shape may not fully defined or other graph inputs?
0027    std::unordered_map<std::string, TensorInfo> fReadyInputTensorInfos; // input tensors where shape is full defined
0028    std::unordered_map<std::string, InitializedTensor> fInitializedTensors;
0029    std::unordered_map<std::string, TensorInfo> fIntermediateTensorInfos;
0030    std::unordered_map<std::string, DynamicTensorInfo> fDynamicTensorInfos;
0031    std::unordered_map<std::string, std::pair<std::vector<Dim>, bool>> fShapeTensors; // constant tensors describing a shape
0032    std::unordered_map<std::string, std::string> fShapeParams; // parameters defining the dynamic shape (e.g. batch size), store also its default value
0033    std::vector<std::string> fDimShapeNames; // parameter names used to define the shapes
0034    std::vector<std::string> fOutputTensorNames;
0035    std::vector<std::string> fInputTensorNames; // input tensor names using ONNX order
0036 
0037 
0038 
0039    std::vector<std::unique_ptr<ROperator>> fOperators;
0040 
0041    std::vector<std::shared_ptr<RModel>> fSubGraphs;    ///<!  sub-graph models (transient)
0042    RModel * fParentGraph = nullptr;
0043 
0044    // memory pool information for intermediate tensors
0045    MemoryPoolInfo fIntermediateMemoryInfo;    ///<!  intermediate memory info (transient)
0046    std::unordered_map<std::string_view, size_t> fIntermediateTensorFrequencyLookup;    ///<!  lookup table for intermediate tensor frequency (transient)
0047 
0048 public:
0049    /**
0050        Default constructor. Needed to allow serialization of ROOT objects. See
0051        https://root.cern/manual/io_custom_classes/#restrictions-on-types-root-io-can-handle
0052    */
0053    RModel() = default;
0054    RModel(std::string name, std::string parsedtime) : RModel_Base(name, parsedtime) {}
0055 
0056    // For GNN Functions usage
0057    RModel(std::string function_name) : RModel_Base(function_name) {}
0058 
0059    int Verbose() const { return fVerbose;}
0060 
0061    std::vector<size_t> GetTensorShape(const std::string & name) const;
0062    std::vector<Dim> GetDimTensorShape(const std::string & name) const;
0063    std::vector<Dim> GetDynamicTensorShape(const std::string & name) const ;
0064 
0065    // get the values for the tensor representing a shape
0066    const std::vector<Dim> & GetShapeTensorValues(const std::string & tensor_name) const;
0067 
0068    ETensorType GetTensorType(std::string name) const;
0069 
0070 
0071    bool CheckIfTensorAlreadyExist(std::string tensor_name);
0072    void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<Dim> shape);
0073    void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<size_t> shape);
0074    void AddOperator(std::unique_ptr<ROperator> op, int order_execution = -1);
0075    void AddOperatorReference(ROperator *op, int order_execution = -1)
0076    {
0077       std::unique_ptr<ROperator> tmp(op);
0078       AddOperator(std::move(tmp), order_execution);
0079    }
0080    void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape,
0081                              std::shared_ptr<void> data);
0082    void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape,
0083                              std::shared_ptr<void> data);
0084 
0085 
0086    template<class T>
0087    void AddConstantTensor(const std::string & name, const std::vector<size_t> & shape, const T * data) {
0088       size_t length = ConvertShapeToLength(shape);
0089       std::shared_ptr<void> data_ptr(malloc(length * sizeof(T)), free);
0090       std::memcpy(data_ptr.get(), (void*) data, length * sizeof(T));
0091       AddConstantTensor(name, GetTemplatedType<T>(T()), shape, data_ptr);
0092    }
0093    // for boolean can be more convenient passing an std::vector
0094    template<class T>
0095    void AddConstantTensor(const std::string & name, const std::vector<size_t> & shape, const std::vector<T> & data) {
0096       size_t length = data.size();
0097       std::shared_ptr<void> data_ptr(malloc(length * sizeof(T)), free);
0098       std::copy(data.begin(), data.end(), (T*) data_ptr.get());
0099       //std::memcpy(data_ptr.get(), (void*) data, length * sizeof(T));
0100       AddConstantTensor(name, GetTemplatedType<T>(T()), shape, data_ptr);
0101    }
0102 
0103    template <typename T>
0104    void AddInitializedTensor(const std::string & tensor_name, const std::vector<std::size_t> & shape, T *raw_data)
0105    {
0106       size_t size = ConvertShapeToLength(shape);
0107       std::shared_ptr<void> data(malloc(size * sizeof(T)), free);
0108       std::memcpy(data.get(), raw_data, size * sizeof(T));
0109       AddInitializedTensor(tensor_name,  GetTemplatedType(T()), shape, data);
0110    }
0111 
0112    void AddShapeTensor(const std::string & name, const std::vector<Dim> & shapeValues, bool scalar = false);
0113 
0114 
0115    // add and initialize subgraph to the model
0116    void InitializeSubGraph(std::shared_ptr<RModel>  graph);
0117 
0118    // set a flag to indicate tensor does not need to be written in a weight file
0119    // (e.g. shape tensors used as input to define a shape (in Reshape))
0120    void SetNotWritableInitializedTensor(const std::string & tensor_name);
0121 
0122    // Check if a tensor is initialized
0123    bool IsInitializedTensor(const std::string &name) const;
0124    // Check if a tensor is Constant (note a Constant tensor is also initialized)
0125    bool IsConstantTensor(const std::string &name) const;
0126    bool IsDynamicTensor(const std::string &name) const;
0127    // Check if tensor is a input dynamic tensor (without a specified shape, based on Sim structure
0128    bool IsDimInputTensor(const std::string &name) const;
0129    // check if tensor is a fully specified input tensor
0130    bool IsReadyInputTensor(const std::string &name) const;
0131    /// check if a tensor is a shape tensor
0132    bool IsShapeTensor(const std::string & name) const;
0133 
0134    // Add intermediate tensor
0135    void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<Dim> dim_shape);
0136    void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape);
0137    // Add an intermediate dynamic tensor
0138    void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector<Dim> shape);
0139    // void Add a shape parameter
0140    void AddShapeParam(const std::string & name, size_t def_value = 0);
0141    void AddInputTensorName(std::string name);
0142    void AddOutputTensorNameList(std::vector<std::string> output_tensor_names);
0143    void
0144    UpdateOutputTensorList(std::vector<std::string> curr_output_tensor, std::vector<std::string> modify_output_tensor);
0145    void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape,
0146                                 std::shared_ptr<void> data);
0147    std::shared_ptr<void> GetInitializedTensorData(std::string tensor_name);
0148 
0149    template<class T>
0150    std::vector<T> GetTensorData(const std::string & name);
0151 
0152    void Initialize(int batchSize = -1, bool verbose = false);
0153    void Initialize(const std::map<std::string,size_t> & inputParams, bool verbose = false);
0154 
0155    void Generate(std::underlying_type_t<Options> options, int batchSize = -1, long pos = 0, bool verbose = false);
0156    void Generate(Options options = Options::kDefault, int batchSize = -1, int pos = 0, bool verbose = false)
0157    {
0158       Generate(static_cast<std::underlying_type_t<Options>>(options), batchSize, pos, verbose);
0159    }
0160    // generate the infer function signature. If isdecl= false generate the calling infer function
0161    // used to infer the sub-graphs
0162    std::string GenerateInferSignature(bool isdecl = true);
0163 
0164    // calculate total intermediate memory and position intermediate tensor addresses
0165    std::string AllocateIntermediateMemory(std::span<const std::string_view> op_output_tensors);
0166    void CheckAndFlushIntermediateMemory(std::span<const std::string_view> op_output_tensors, const size_t& op_idx);
0167 
0168    void SetOptimizationLevel(const OptimizationLevel &optim_level) { fOptimizationLevel = optim_level; }
0169 
0170    // get the size in bytes of the constant tensors
0171    size_t GetConstantTensorSize() const { return fConstantTensorSize; }
0172    // get the size in bytes of the weight tensors
0173    size_t GetWeightsTensorSize() const { return fWeightsTensorSize; }
0174    // get the size in bytes of the intermediate tensors which are not part of the memory pool
0175    size_t GetOtherTensorSize() const { return fOtherTensorSize; }
0176    // get the size in bytes of the intermediate tensors managed by the memory pool
0177    size_t GetIntermediateTensorSize() const {
0178       return (!fIntermediateMemoryInfo.total_stack.empty())
0179                 ? fIntermediateMemoryInfo.total_stack.rbegin()->first + fIntermediateMemoryInfo.total_stack.rbegin()->second.tensor_size
0180                 : 0;
0181    }
0182 
0183 protected:
0184    // internal functions
0185    // generate code for the initialized tensors
0186    void GenerateInitializedTensorInfo();
0187    // generate code for the intermediate tensors
0188    void GenerateIntermediateTensorInfo();
0189    // generate code for the dynamic tensors
0190    void GenerateDynamicTensorInfo();
0191    // generate code for declarations needed by operators
0192    void GenerateOperatorDeclarations();
0193    // generate code for inference
0194    void GenerateOutput();
0195    // generate code for initializing memory pool for intermediate tensors
0196    void GenerateIntermediateMemoryPool();
0197    // Generate all session code
0198    void GenerateSessionCode();
0199 
0200 public:
0201    const std::vector<std::string> & GetInputTensorNames() const { return fInputTensorNames; }
0202    const std::vector<std::string> & GetOutputTensorNames() const { return fOutputTensorNames; }
0203    const std::vector<std::string> & GetDimShapeNames() const { return fDimShapeNames; }
0204 
0205    void ReadInitializedTensorsFromFile(long);
0206    long WriteInitializedTensorsToFile(std::string filename = "");
0207 
0208    void PrintIntermediateTensors();
0209    void PrintOutputTensors();
0210    void OutputGenerated(std::string filename = "", bool append = false);
0211    std::vector<std::string> GetOutputTensorNames() { return fOutputTensorNames; }
0212    void SetFilename(std::string filename) { fName = filename; }
0213 
0214    /*
0215       template <typename T>
0216       void AddInitializedTensor(std::string tensor_name, RTensor<T> new_tensor){
0217          //a view only
0218          T obj;
0219          if (fInitializedTensors.find(tensor_name) != fInitializedTensors.end()){
0220             throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
0221          }
0222          InitializedTensor new_tensor_ {GetTemplatedType(obj), new_tensor.GetShape() ,
0223       static_cast<void>(new_tensor.GetData())}; fInitializedTensors[tensor_name] = new_tensor_;
0224       }
0225    */
0226 
0227    void PrintRequiredInputTensors();
0228    void PrintInitializedTensors();
0229    void PrintDynamicTensors();
0230    void HeadInitializedTensors(std::string name, int n_print = 50);
0231 
0232    bool UseSession() const { return fUseSession; }
0233 
0234    // Use the ClassDef macro to allow definition of custom streaming
0235    ClassDefNV(RModel, 3);
0236 };
0237 
0238 // need to implement here templated member functions and its specialization
0239 
0240 
0241 template<class T>
0242 inline std::vector<T> RModel::GetTensorData(const std::string & name) {
0243    if (!IsInitializedTensor(name)) return std::vector<T>{};
0244    T * data = static_cast<T*>(GetInitializedTensorData(name).get());
0245    size_t size = ConvertShapeToLength(GetTensorShape(name));
0246    return std::vector<T>(data, data+size);
0247 }
0248 
0249 template<>
0250 inline std::vector<Dim> RModel::GetTensorData<Dim>(const std::string & name) {
0251    if (!IsShapeTensor(name)) return std::vector<Dim>{};
0252    return GetShapeTensorValues(name);
0253 }
0254 
0255 } // namespace SOFIE
0256 } // namespace Experimental
0257 } // namespace TMVA
0258 
0259 #endif // TMVA_SOFIE_RMODEL