File indexing completed on 2025-10-27 08:59:45
0001 #ifndef TMVA_SOFIE_ROPERATOR_RANGE
0002 #define TMVA_SOFIE_ROPERATOR_RANGE
0003
0004 #include "TMVA/SOFIE_common.hxx"
0005 #include "TMVA/ROperator.hxx"
0006 #include "TMVA/RModel.hxx"
0007
0008 #include <sstream>
0009 #include <algorithm>
0010
0011 namespace TMVA{
0012 namespace Experimental{
0013 namespace SOFIE{
0014
0015 template <typename T>
0016 class ROperator_Range final : public ROperator
0017 {
0018 private:
0019
0020 std::string fNStart;
0021 std::string fNLimit;
0022 std::string fNDelta;
0023 std::string fNOutput;
0024 std::vector<Dim> fShape;
0025 std::string fType;
0026
0027 public:
0028 ROperator_Range(){}
0029
0030 ROperator_Range(std::string start, std::string limit, std::string delta, std::string nameOutput):
0031 fNStart(start), fNLimit(limit), fNDelta(delta),
0032 fNOutput(UTILITY::Clean_name(nameOutput)) {
0033 if (std::is_same<T, float>::value) {
0034 fType = "float";
0035 } else if (std::is_same<T, int64_t>::value) {
0036 fType = "int64_t";
0037 }
0038 static_assert( (std::is_same_v<T, float> || std::is_same_v<T, int64_t>),
0039 "TMVA::SOFIE - Unsupported type by Range operator");
0040 }
0041
0042 std::vector<ETensorType> TypeInference(std::vector<ETensorType> input) override {
0043 return input;
0044 }
0045
0046 std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input) override {
0047 auto ret = input;
0048 return ret;
0049 }
0050
0051 void Initialize(RModel& model) override {
0052
0053 if (!model.CheckIfTensorAlreadyExist(fNStart)) {
0054 throw
0055 std::runtime_error("TMVA SOFIE Range Op Input Tensor " + fNStart + "is not found in model");
0056 }
0057 if (!model.CheckIfTensorAlreadyExist(fNLimit)) {
0058 throw
0059 std::runtime_error("TMVA SOFIE Range Op Input Tensor " + fNLimit + "is not found in model");
0060 }
0061 if (!model.CheckIfTensorAlreadyExist(fNDelta)) {
0062 throw
0063 std::runtime_error("TMVA SOFIE Range Op Input Tensor " + fNDelta + "is not found in model");
0064 }
0065 ETensorType type = ConvertStringToType(fType);
0066 if (model.IsInitializedTensor(fNStart) && model.IsInitializedTensor(fNDelta) && model.IsInitializedTensor(fNLimit)) {
0067 T * start = static_cast<T*>(model.GetInitializedTensorData(fNStart).get());
0068 T * limit = static_cast<T*>(model.GetInitializedTensorData(fNLimit).get());
0069 T * delta = static_cast<T*>(model.GetInitializedTensorData(fNDelta).get());
0070 if (!start || !delta || !limit)
0071 std::runtime_error("TMVA SOFIE Range Op Input Tensor has invalid input data");
0072 T a = *start;
0073 T b = *limit;
0074 T d = *delta;
0075 int number_of_elements = std::max( static_cast<double>(std::ceil( (b - a) / d )) , 0. );
0076 std::vector<T> output(number_of_elements);
0077 for (int i=0; i<number_of_elements; ++i) {
0078 output[i] = a + (i * d);
0079 }
0080 std::vector<size_t> shape = {static_cast<size_t>(number_of_elements)};
0081 model.AddConstantTensor(fNOutput,shape, output.data());
0082 fIsOutputConstant = true;
0083
0084 model.SetNotWritableInitializedTensor(fNStart);
0085 model.SetNotWritableInitializedTensor(fNDelta);
0086 model.SetNotWritableInitializedTensor(fNLimit);
0087 }
0088 else {
0089 fShape = {Dim{"range_size"}};
0090 model.AddDynamicTensor(fNOutput, type, fShape);
0091 }
0092 if (model.Verbose()) {
0093 std::cout << "Range -> output is " << fNOutput << " ";
0094 if (fIsOutputConstant) std::cout << ConvertDynamicShapeToString(fShape) << std::endl;
0095 else std::cout << ConvertDynamicShapeToString(model.GetDynamicTensorShape(fNOutput)) << std::endl;
0096 }
0097 }
0098
0099 std::string Generate(std::string OpName) override {
0100
0101 std::stringstream out;
0102 out << "\n//------ Range\n";
0103 if (fIsOutputConstant) return out.str();
0104
0105 OpName = "op_" + OpName;
0106 if (fShape.empty()) {
0107 throw std::runtime_error("TMVA SOFIE Range operator called to Generate without being initialized first");
0108 }
0109
0110 std::string sizeName = fShape[0].param;
0111 out << SP << "size_t " << sizeName << " = static_cast<size_t>(std::max(std::ceil((static_cast<float>(*tensor_" << fNLimit << ") - static_cast<float>(*tensor_" << fNStart << ")) / static_cast<float>(*tensor_" << fNDelta << ")), 0.0f));\n";
0112 out << SP << "if (" << sizeName << " > " << "fTensor_" << fNOutput << ".size() ){\n";
0113 out << SP << SP << "fTensor_" << fNOutput << ".resize(" << sizeName << ");\n";
0114
0115 out << SP << SP << "tensor_" << fNOutput << " = fTensor_" << fNOutput << ".data();\n";
0116 out << SP << "}\n";
0117 out << SP << "for (size_t i = 0; i < " << sizeName << "; i++) {\n";
0118 out << SP << SP << "fTensor_" << fNOutput << "[i] = *tensor_" << fNStart << " + i * (*tensor_" << fNDelta << ");\n";
0119 out << SP << "}\n";
0120 return out.str();
0121 }
0122 };
0123
0124 }
0125 }
0126 }
0127
0128 #endif