File indexing completed on 2025-10-25 07:59:25
0001
0002
0003
0004 #include <fmt/core.h>
0005 #include <onnxruntime_c_api.h>
0006 #include <onnxruntime_cxx_api.h>
0007 #include <algorithm>
0008 #include <cstddef>
0009 #include <exception>
0010 #include <gsl/pointers>
0011 #include <iterator>
0012 #include <sstream>
0013
0014 #include "InclusiveKinematicsML.h"
0015
0016 namespace eicrecon {
0017
0018 static std::string print_shape(const std::vector<std::int64_t>& v) {
0019 std::stringstream ss("");
0020 for (std::size_t i = 0; i < v.size() - 1; i++) {
0021 ss << v[i] << "x";
0022 }
0023 ss << v[v.size() - 1];
0024 return ss.str();
0025 }
0026
0027 template <typename T>
0028 Ort::Value vec_to_tensor(std::vector<T>& data, const std::vector<std::int64_t>& shape) {
0029 Ort::MemoryInfo mem_info = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator,
0030 OrtMemType::OrtMemTypeDefault);
0031 auto tensor =
0032 Ort::Value::CreateTensor<T>(mem_info, data.data(), data.size(), shape.data(), shape.size());
0033 return tensor;
0034 }
0035
0036 void InclusiveKinematicsML::init() {
0037
0038 m_env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "inclusive-kinematics-ml");
0039 Ort::SessionOptions session_options;
0040 session_options.SetInterOpNumThreads(1);
0041 session_options.SetIntraOpNumThreads(1);
0042 try {
0043 m_session = Ort::Session(m_env, m_cfg.modelPath.c_str(), session_options);
0044
0045
0046 Ort::AllocatorWithDefaultOptions allocator;
0047 debug("Input Node Name/Shape:");
0048 for (std::size_t i = 0; i < m_session.GetInputCount(); i++) {
0049 m_input_names.emplace_back(m_session.GetInputNameAllocated(i, allocator).get());
0050 if (m_session.GetInputTypeInfo(i).GetONNXType() == ONNX_TYPE_TENSOR) {
0051 m_input_shapes.emplace_back(
0052 m_session.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
0053 debug("\t{} : {}", m_input_names.at(i), print_shape(m_input_shapes.at(i)));
0054 } else {
0055 m_input_shapes.emplace_back();
0056 debug("\t{} : not a tensor", m_input_names.at(i));
0057 }
0058 }
0059
0060
0061 debug("Output Node Name/Shape:");
0062 for (std::size_t i = 0; i < m_session.GetOutputCount(); i++) {
0063 m_output_names.emplace_back(m_session.GetOutputNameAllocated(i, allocator).get());
0064 if (m_session.GetOutputTypeInfo(i).GetONNXType() == ONNX_TYPE_TENSOR) {
0065 m_output_shapes.emplace_back(
0066 m_session.GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
0067 debug("\t{} : {}", m_output_names.at(i), print_shape(m_output_shapes.at(i)));
0068 } else {
0069 m_output_shapes.emplace_back();
0070 debug("\t{} : not a tensor", m_output_names.at(i));
0071 }
0072 }
0073
0074
0075 m_input_names_char.resize(m_input_names.size(), nullptr);
0076 std::ranges::transform(m_input_names, std::begin(m_input_names_char),
0077 [&](const std::string& str) { return str.c_str(); });
0078 m_output_names_char.resize(m_output_names.size(), nullptr);
0079 std::ranges::transform(m_output_names, std::begin(m_output_names_char),
0080 [&](const std::string& str) { return str.c_str(); });
0081
0082 } catch (std::exception& e) {
0083 error(e.what());
0084 }
0085 }
0086
0087 void InclusiveKinematicsML::process(const InclusiveKinematicsML::Input& input,
0088 const InclusiveKinematicsML::Output& output) const {
0089
0090 const auto [electron, da] = input;
0091 auto [ml] = output;
0092
0093
0094 if (electron->empty() || da->empty()) {
0095 debug("skipping because input collections have no entries");
0096 return;
0097 }
0098
0099
0100 if (m_input_names.size() != 1 || m_output_names.size() != 1) {
0101 debug("skipping because model has incorrect input and output size");
0102 return;
0103 }
0104
0105
0106 std::vector<float> input_tensor_values;
0107 std::vector<Ort::Value> input_tensors;
0108 for (auto&& i : *electron) {
0109 input_tensor_values.push_back(i.getX());
0110 }
0111 input_tensors.emplace_back(vec_to_tensor<float>(input_tensor_values, m_input_shapes.front()));
0112
0113
0114 if (!input_tensors[0].IsTensor() ||
0115 input_tensors[0].GetTensorTypeAndShapeInfo().GetShape() != m_input_shapes.front()) {
0116 debug("skipping because input tensor shape incorrect");
0117 return;
0118 }
0119
0120
0121 try {
0122 auto output_tensors = m_session.Run(Ort::RunOptions{nullptr}, m_input_names_char.data(),
0123 input_tensors.data(), m_input_names_char.size(),
0124 m_output_names_char.data(), m_output_names_char.size());
0125
0126
0127 if (!output_tensors[0].IsTensor() || output_tensors.size() != m_output_names.size()) {
0128 debug("skipping because output tensor shape incorrect");
0129 return;
0130 }
0131
0132
0133 auto* output_tensor_data = output_tensors[0].GetTensorMutableData<float>();
0134 auto x = output_tensor_data[0];
0135 auto kin = ml->create();
0136 kin.setX(x);
0137
0138 } catch (const Ort::Exception& exception) {
0139 error("error running model inference: {}", exception.what());
0140 }
0141 }
0142
0143 }