Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2026-05-10 08:43:19

0001 //===- TensorSpec.h - type descriptor for a tensor --------------*- C++ -*-===//
0002 //
0003 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
0004 // See https://llvm.org/LICENSE.txt for license information.
0005 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
0006 //
0007 //===----------------------------------------------------------------------===//
0008 //
0009 #ifndef LLVM_ANALYSIS_TENSORSPEC_H
0010 #define LLVM_ANALYSIS_TENSORSPEC_H
0011 
0012 #include "llvm/Config/llvm-config.h"
0013 
0014 #include "llvm/ADT/StringMap.h"
0015 #include "llvm/IR/LLVMContext.h"
0016 
0017 #include <memory>
0018 #include <optional>
0019 #include <vector>
0020 
0021 namespace llvm {
0022 namespace json {
0023 class OStream;
0024 class Value;
0025 } // namespace json
0026 
0027 /// TensorSpec encapsulates the specification of a tensor: its dimensions, or
0028 /// "shape" (row-major), its type (see TensorSpec::getDataType specializations
0029 /// for supported types), its name and port (see "TensorFlow: Large-Scale
0030 /// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
0031 /// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
0032 ///
0033 /// Note that the design is motivated by Tensorflow, but it is not intended to
0034 /// be Tensorflow-specific.
0035 ///
0036 /// Known tensor types. The left part is the C type, the
0037 /// right is a name we can use to identify the type (to implement TensorSpec
0038 /// equality checks), and to use, if needed, when mapping to an underlying
0039 /// evaluator's type system. The main requirement is that the C type we use has
0040 /// the same size and encoding (e.g. endian-ness) as the one used by the
0041 /// evaluator.
0042 #define SUPPORTED_TENSOR_TYPES(M)                                              \
0043   M(float, Float)                                                              \
0044   M(double, Double)                                                            \
0045   M(int8_t, Int8)                                                              \
0046   M(uint8_t, UInt8)                                                            \
0047   M(int16_t, Int16)                                                            \
0048   M(uint16_t, UInt16)                                                          \
0049   M(int32_t, Int32)                                                            \
0050   M(uint32_t, UInt32)                                                          \
0051   M(int64_t, Int64)                                                            \
0052   M(uint64_t, UInt64)
0053 
0054 enum class TensorType {
0055   Invalid,
0056 #define _TENSOR_TYPE_ENUM_MEMBERS(_, Name) Name,
0057   SUPPORTED_TENSOR_TYPES(_TENSOR_TYPE_ENUM_MEMBERS)
0058 #undef _TENSOR_TYPE_ENUM_MEMBERS
0059       Total
0060 };
0061 
0062 class TensorSpec final {
0063 public:
0064   template <typename T>
0065   static TensorSpec createSpec(const std::string &Name,
0066                                const std::vector<int64_t> &Shape,
0067                                int Port = 0) {
0068     return TensorSpec(Name, Port, getDataType<T>(), sizeof(T), Shape);
0069   }
0070 
0071   const std::string &name() const { return Name; }
0072   int port() const { return Port; }
0073   TensorType type() const { return Type; }
0074   const std::vector<int64_t> &shape() const { return Shape; }
0075 
0076   bool operator==(const TensorSpec &Other) const {
0077     return Name == Other.Name && Port == Other.Port && Type == Other.Type &&
0078            Shape == Other.Shape;
0079   }
0080 
0081   bool operator!=(const TensorSpec &Other) const { return !(*this == Other); }
0082 
0083   /// Get the number of elements in a tensor with this shape.
0084   size_t getElementCount() const { return ElementCount; }
0085   /// Get the size, in bytes, of one element.
0086   size_t getElementByteSize() const { return ElementSize; }
0087   /// Get the total size of a memory buffer needed to store the whole tensor.
0088   size_t getTotalTensorBufferSize() const { return ElementCount * ElementSize; }
0089 
0090   template <typename T> bool isElementType() const {
0091     return getDataType<T>() == Type;
0092   }
0093 
0094   TensorSpec(const std::string &NewName, const TensorSpec &Other)
0095       : TensorSpec(NewName, Other.Port, Other.Type, Other.ElementSize,
0096                    Other.Shape) {}
0097 
0098   void toJSON(json::OStream &OS) const;
0099 
0100 private:
0101   TensorSpec(const std::string &Name, int Port, TensorType Type,
0102              size_t ElementSize, const std::vector<int64_t> &Shape);
0103 
0104   template <typename T> static TensorType getDataType();
0105 
0106   std::string Name;
0107   int Port = 0;
0108   TensorType Type = TensorType::Invalid;
0109   std::vector<int64_t> Shape;
0110   size_t ElementCount = 0;
0111   size_t ElementSize = 0;
0112 };
0113 
0114 /// For debugging.
0115 std::string tensorValueToString(const char *Buffer, const TensorSpec &Spec);
0116 
0117 /// Construct a TensorSpec from a JSON dictionary of the form:
0118 /// { "name": <string>,
0119 ///   "port": <int>,
0120 ///   "type": <string. Use LLVM's types, e.g. float, double, int64_t>,
0121 ///   "shape": <array of ints> }
0122 /// For the "type" field, see the C++ primitive types used in
0123 /// TFUTILS_SUPPORTED_TYPES.
0124 std::optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
0125                                                 const json::Value &Value);
0126 
0127 #define TFUTILS_GETDATATYPE_DEF(T, Name)                                       \
0128   template <> TensorType TensorSpec::getDataType<T>();
0129 SUPPORTED_TENSOR_TYPES(TFUTILS_GETDATATYPE_DEF)
0130 
0131 #undef TFUTILS_GETDATATYPE_DEF
0132 } // namespace llvm
0133 
0134 #endif // LLVM_ANALYSIS_TENSORSPEC_H