Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-31 10:12:20

0001 // Protocol Buffers - Google's data interchange format
0002 // Copyright 2008 Google Inc.  All rights reserved.
0003 //
0004 // Use of this source code is governed by a BSD-style
0005 // license that can be found in the LICENSE file or at
0006 // https://developers.google.com/open-source/licenses/bsd
0007 
0008 // Author: kenton@google.com (Kenton Varda)
0009 //  Based on original Protocol Buffers design by
0010 //  Sanjay Ghemawat, Jeff Dean, and others.
0011 //
0012 // This header is logically internal, but is made public because it is used
0013 // from protocol-compiler-generated code, which may reside in other components.
0014 
0015 #ifndef GOOGLE_PROTOBUF_GENERATED_MESSAGE_REFLECTION_H__
0016 #define GOOGLE_PROTOBUF_GENERATED_MESSAGE_REFLECTION_H__
0017 
0018 #include <atomic>
0019 #include <cstddef>
0020 #include <cstdint>
0021 #include <string>
0022 
0023 #include "absl/base/call_once.h"
0024 #include "absl/log/absl_check.h"
0025 #include "google/protobuf/descriptor.h"
0026 #include "google/protobuf/generated_enum_reflection.h"
0027 #include "google/protobuf/port.h"
0028 #include "google/protobuf/unknown_field_set.h"
0029 
0030 // Must be included last.
0031 #include "google/protobuf/port_def.inc"
0032 
0033 #ifdef SWIG
0034 #error "You cannot SWIG proto headers"
0035 #endif
0036 
0037 namespace google {
0038 namespace protobuf {
0039 class MapKey;
0040 class MapValueRef;
0041 class MessageLayoutInspector;
0042 class Message;
0043 struct Metadata;
0044 
0045 namespace io {
0046 class CodedOutputStream;
0047 }
0048 }  // namespace protobuf
0049 }  // namespace google
0050 
0051 namespace google {
0052 namespace protobuf {
0053 namespace internal {
0054 class DefaultEmptyOneof;
0055 // Defined in other files.
0056 class ExtensionSet;  // extension_set.h
0057 class WeakFieldMap;  // weak_field_map.h
0058 
0059 // Tag used on offsets for fields that don't have a real offset.
0060 // For example, weak message fields go into the WeakFieldMap and not in an
0061 // actual field.
0062 constexpr uint32_t kInvalidFieldOffsetTag = 0x40000000u;
0063 
0064 // Mask used on offsets for split fields.
0065 constexpr uint32_t kSplitFieldOffsetMask = 0x80000000u;
0066 constexpr uint32_t kLazyMask = 0x1u;
0067 constexpr uint32_t kInlinedMask = 0x1u;
0068 
0069 // This struct describes the internal layout of the message, hence this is
0070 // used to act on the message reflectively.
0071 //   default_instance:  The default instance of the message.  This is only
0072 //                  used to obtain pointers to default instances of embedded
0073 //                  messages, which GetMessage() will return if the particular
0074 //                  sub-message has not been initialized yet.  (Thus, all
0075 //                  embedded message fields *must* have non-null pointers
0076 //                  in the default instance.)
0077 //   offsets:       An array of ints giving the byte offsets.
0078 //                  For each oneof or weak field, the offset is relative to the
0079 //                  default_instance. These can be computed at compile time
0080 //                  using the
0081 //                  PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET()
0082 //                  macro. For each none oneof field, the offset is related to
0083 //                  the start of the message object.  These can be computed at
0084 //                  compile time using the
0085 //                  PROTO2_GENERATED_MESSAGE_FIELD_OFFSET() macro.
0086 //                  Besides offsets for all fields, this array also contains
0087 //                  offsets for oneof unions. The offset of the i-th oneof union
0088 //                  is offsets[descriptor->field_count() + i].
0089 //   has_bit_indices:  Mapping from field indexes to their index in the has
0090 //                  bit array.
0091 //   has_bits_offset:  Offset in the message of an array of uint32s of size
0092 //                  descriptor->field_count()/32, rounded up.  This is a
0093 //                  bitfield where each bit indicates whether or not the
0094 //                  corresponding field of the message has been initialized.
0095 //                  The bit for field index i is obtained by the expression:
0096 //                    has_bits[i / 32] & (1 << (i % 32))
0097 //   unknown_fields_offset:  Offset in the message of the UnknownFieldSet for
0098 //                  the message.
0099 //   extensions_offset:  Offset in the message of the ExtensionSet for the
0100 //                  message, or -1 if the message type has no extension
0101 //                  ranges.
0102 //   oneof_case_offset:  Offset in the message of an array of uint32s of
0103 //                  size descriptor->oneof_decl_count().  Each uint32_t
0104 //                  indicates what field is set for each oneof.
0105 //   object_size:   The size of a message object of this type, as measured
0106 //                  by sizeof().
0107 //   arena_offset:  If a message doesn't have a unknown_field_set that stores
0108 //                  the arena, it must have a direct pointer to the arena.
0109 //   weak_field_map_offset: If the message proto has weak fields, this is the
0110 //                  offset of _weak_field_map_ in the generated proto. Otherwise
0111 //                  -1.
0112 struct ReflectionSchema {
0113  public:
0114   // Size of a google::protobuf::Message object of this type.
0115   uint32_t GetObjectSize() const { return static_cast<uint32_t>(object_size_); }
0116 
0117   bool InRealOneof(const FieldDescriptor* field) const {
0118     return field->real_containing_oneof();
0119   }
0120 
0121   // Offset of a non-oneof field.  Getting a field offset is slightly more
0122   // efficient when we know statically that it is not a oneof field.
0123   uint32_t GetFieldOffsetNonOneof(const FieldDescriptor* field) const {
0124     ABSL_DCHECK(!InRealOneof(field));
0125     return OffsetValue(offsets_[field->index()], field->type());
0126   }
0127 
0128   // Offset of any field.
0129   uint32_t GetFieldOffset(const FieldDescriptor* field) const {
0130     if (InRealOneof(field)) {
0131       size_t offset =
0132           static_cast<size_t>(field->containing_type()->field_count()) +
0133           field->containing_oneof()->index();
0134       return OffsetValue(offsets_[offset], field->type());
0135     } else {
0136       return GetFieldOffsetNonOneof(field);
0137     }
0138   }
0139 
0140   bool IsFieldInlined(const FieldDescriptor* field) const {
0141     return Inlined(offsets_[field->index()], field->type());
0142   }
0143 
0144   uint32_t GetOneofCaseOffset(const OneofDescriptor* oneof_descriptor) const {
0145     return static_cast<uint32_t>(oneof_case_offset_) +
0146            static_cast<uint32_t>(
0147                static_cast<size_t>(oneof_descriptor->index()) *
0148                sizeof(uint32_t));
0149   }
0150 
0151   bool HasHasbits() const { return has_bits_offset_ != -1; }
0152 
0153   // Bit index within the bit array of hasbits.  Bit order is low-to-high.
0154   uint32_t HasBitIndex(const FieldDescriptor* field) const {
0155     if (has_bits_offset_ == -1) return static_cast<uint32_t>(-1);
0156     ABSL_DCHECK(HasHasbits());
0157     return has_bit_indices_[field->index()];
0158   }
0159 
0160   // Byte offset of the hasbits array.
0161   uint32_t HasBitsOffset() const {
0162     ABSL_DCHECK(HasHasbits());
0163     return static_cast<uint32_t>(has_bits_offset_);
0164   }
0165 
0166   bool HasInlinedString() const { return inlined_string_donated_offset_ != -1; }
0167 
0168   // Bit index within the bit array of _inlined_string_donated_.  Bit order is
0169   // low-to-high.
0170   uint32_t InlinedStringIndex(const FieldDescriptor* field) const {
0171     ABSL_DCHECK(HasInlinedString());
0172     return inlined_string_indices_[field->index()];
0173   }
0174 
0175   // Byte offset of the _inlined_string_donated_ array.
0176   uint32_t InlinedStringDonatedOffset() const {
0177     ABSL_DCHECK(HasInlinedString());
0178     return static_cast<uint32_t>(inlined_string_donated_offset_);
0179   }
0180 
0181   // The offset of the InternalMetadataWithArena member.
0182   // For Lite this will actually be an InternalMetadataWithArenaLite.
0183   // The schema doesn't contain enough information to distinguish between
0184   // these two cases.
0185   uint32_t GetMetadataOffset() const {
0186     return static_cast<uint32_t>(metadata_offset_);
0187   }
0188 
0189   // Whether this message has an ExtensionSet.
0190   bool HasExtensionSet() const { return extensions_offset_ != -1; }
0191 
0192   // The offset of the ExtensionSet in this message.
0193   uint32_t GetExtensionSetOffset() const {
0194     ABSL_DCHECK(HasExtensionSet());
0195     return static_cast<uint32_t>(extensions_offset_);
0196   }
0197 
0198   // The off set of WeakFieldMap when the message contains weak fields.
0199   // The default is 0 for now.
0200   int GetWeakFieldMapOffset() const { return weak_field_map_offset_; }
0201 
0202   bool IsDefaultInstance(const Message& message) const {
0203     return &message == default_instance_;
0204   }
0205 
0206   // Returns a pointer to the default value for this field.  The size and type
0207   // of the underlying data depends on the field's type.
0208   const void* GetFieldDefault(const FieldDescriptor* field) const {
0209     return reinterpret_cast<const uint8_t*>(default_instance_) +
0210            OffsetValue(offsets_[field->index()], field->type());
0211   }
0212 
0213   // Returns true if the field is implicitly backed by LazyField.
0214   bool IsEagerlyVerifiedLazyField(const FieldDescriptor* field) const {
0215     ABSL_DCHECK_EQ(field->type(), FieldDescriptor::TYPE_MESSAGE);
0216     (void)field;
0217     return false;
0218   }
0219 
0220   bool IsSplit() const { return split_offset_ != -1; }
0221 
0222   bool IsSplit(const FieldDescriptor* field) const {
0223     return split_offset_ != -1 &&
0224            (offsets_[field->index()] & kSplitFieldOffsetMask) != 0;
0225   }
0226 
0227   // Byte offset of _split_.
0228   uint32_t SplitOffset() const {
0229     ABSL_DCHECK(IsSplit());
0230     return static_cast<uint32_t>(split_offset_);
0231   }
0232 
0233   uint32_t SizeofSplit() const {
0234     ABSL_DCHECK(IsSplit());
0235     return static_cast<uint32_t>(sizeof_split_);
0236   }
0237 
0238 
0239   bool HasWeakFields() const { return weak_field_map_offset_ > 0; }
0240 
0241   // These members are intended to be private, but we cannot actually make them
0242   // private because this prevents us from using aggregate initialization of
0243   // them, ie.
0244   //
0245   //   ReflectionSchema schema = {a, b, c, d, e, ...};
0246   // private:
0247   const Message* default_instance_;
0248   const uint32_t* offsets_;
0249   const uint32_t* has_bit_indices_;
0250   int has_bits_offset_;
0251   int metadata_offset_;
0252   int extensions_offset_;
0253   int oneof_case_offset_;
0254   int object_size_;
0255   int weak_field_map_offset_;
0256   const uint32_t* inlined_string_indices_;
0257   int inlined_string_donated_offset_;
0258   int split_offset_;
0259   int sizeof_split_;
0260 
0261   // We tag offset values to provide additional data about fields (such as
0262   // "unused" or "lazy" or "inlined").
0263   static uint32_t OffsetValue(uint32_t v, FieldDescriptor::Type type) {
0264     if (type == FieldDescriptor::TYPE_MESSAGE ||
0265         type == FieldDescriptor::TYPE_STRING ||
0266         type == FieldDescriptor::TYPE_BYTES) {
0267       return v & (~kSplitFieldOffsetMask) & (~kInlinedMask) & (~kLazyMask);
0268     }
0269     return v & (~kSplitFieldOffsetMask);
0270   }
0271 
0272   static bool Inlined(uint32_t v, FieldDescriptor::Type type) {
0273     if (type == FieldDescriptor::TYPE_STRING ||
0274         type == FieldDescriptor::TYPE_BYTES) {
0275       return (v & kInlinedMask) != 0u;
0276     } else {
0277       // Non string/byte fields are not inlined.
0278       return false;
0279     }
0280   }
0281 };
0282 
0283 // Structs that the code generator emits directly to describe a message.
0284 // These should never used directly except to build a ReflectionSchema
0285 // object.
0286 //
0287 // EXPERIMENTAL: these are changing rapidly, and may completely disappear
0288 // or merge with ReflectionSchema.
0289 struct MigrationSchema {
0290   int32_t offsets_index;
0291   int32_t has_bit_indices_index;
0292   int32_t inlined_string_indices_index;
0293   int object_size;
0294 };
0295 
0296 // This struct tries to reduce unnecessary padding.
0297 // The num_xxx might not be close to their respective pointer, but this saves
0298 // padding.
0299 struct PROTOBUF_EXPORT DescriptorTable {
0300   mutable bool is_initialized;
0301   bool is_eager;
0302   int size;  // of serialized descriptor
0303   const char* descriptor;
0304   const char* filename;
0305   absl::once_flag* once;
0306   const DescriptorTable* const* deps;
0307   int num_deps;
0308   int num_messages;
0309   const MigrationSchema* schemas;
0310   const Message* const* default_instances;
0311   const uint32_t* offsets;
0312   // update the following descriptor arrays.
0313   const EnumDescriptor** file_level_enum_descriptors;
0314   const ServiceDescriptor** file_level_service_descriptors;
0315 };
0316 
0317 // AssignDescriptors() pulls the compiled FileDescriptor from the DescriptorPool
0318 // and uses it to populate all of the global variables which store pointers to
0319 // the descriptor objects.  It also constructs the reflection objects.  It is
0320 // called the first time anyone calls descriptor() or GetReflection() on one of
0321 // the types defined in the file.  AssignDescriptors() is thread-safe.
0322 void PROTOBUF_EXPORT AssignDescriptors(const DescriptorTable* table);
0323 // As above, but the caller did the call_once call already.
0324 void PROTOBUF_EXPORT
0325 AssignDescriptorsOnceInnerCall(const DescriptorTable* table);
0326 
0327 // These cannot be in lite so we put them in the reflection.
0328 PROTOBUF_EXPORT void UnknownFieldSetSerializer(const uint8_t* base,
0329                                                uint32_t offset, uint32_t tag,
0330                                                uint32_t has_offset,
0331                                                io::CodedOutputStream* output);
0332 
0333 PROTOBUF_EXPORT void InitializeFileDescriptorDefaultInstances();
0334 
0335 PROTOBUF_EXPORT void AddDescriptors(const DescriptorTable* table);
0336 
0337 struct PROTOBUF_EXPORT AddDescriptorsRunner {
0338   explicit AddDescriptorsRunner(const DescriptorTable* table);
0339 };
0340 
0341 // Retrieves the existing prototype out of a descriptor table.
0342 // If it doesn't exist:
0343 //  - If force_build is true, asks the generated message factory for one.
0344 //  - Otherwise, return null
0345 const Message* GetPrototypeForWeakDescriptor(const DescriptorTable* table,
0346                                              int index, bool force_build);
0347 
0348 struct DenseEnumCacheInfo {
0349   std::atomic<const std::string**> cache;
0350   int min_val;
0351   int max_val;
0352   const EnumDescriptor* (*descriptor_fn)();
0353 };
0354 PROTOBUF_EXPORT const std::string& NameOfDenseEnumSlow(int v,
0355                                                        DenseEnumCacheInfo*);
0356 
0357 // Similar to the routine NameOfEnum, this routine returns the name of an enum.
0358 // Unlike that routine, it allocates, on-demand, a block of pointers to the
0359 // std::string objects allocated by reflection to store the enum names. This
0360 // way, as long as the enum values are fairly dense, looking them up can be
0361 // very fast. This assumes all the enums fall in the range [min_val .. max_val].
0362 template <const EnumDescriptor* (*descriptor_fn)(), int min_val, int max_val>
0363 const std::string& NameOfDenseEnum(int v) {
0364   static_assert(max_val - min_val >= 0, "Too many enums between min and max.");
0365   static DenseEnumCacheInfo deci = {/* atomic ptr */ {}, min_val, max_val,
0366                                     descriptor_fn};
0367   const std::string** cache = deci.cache.load(std::memory_order_acquire );
0368   if (PROTOBUF_PREDICT_TRUE(cache != nullptr)) {
0369     if (PROTOBUF_PREDICT_TRUE(v >= min_val && v <= max_val)) {
0370       return *cache[v - min_val];
0371     }
0372   }
0373   return NameOfDenseEnumSlow(v, &deci);
0374 }
0375 
0376 // Returns whether this type of field is stored in the split struct as a raw
0377 // pointer.
0378 PROTOBUF_EXPORT bool SplitFieldHasExtraIndirection(
0379     const FieldDescriptor* field);
0380 
0381 }  // namespace internal
0382 }  // namespace protobuf
0383 }  // namespace google
0384 
0385 #include "google/protobuf/port_undef.inc"
0386 
0387 #endif  // GOOGLE_PROTOBUF_GENERATED_MESSAGE_REFLECTION_H__