Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-31 10:12:27

0001 // Protocol Buffers - Google's data interchange format
0002 // Copyright 2022 Google Inc.  All rights reserved.
0003 //
0004 // Use of this source code is governed by a BSD-style
0005 // license that can be found in the LICENSE file or at
0006 // https://developers.google.com/open-source/licenses/bsd
0007 //
0008 // This file defines the internal class ThreadSafeArena
0009 
0010 #ifndef GOOGLE_PROTOBUF_THREAD_SAFE_ARENA_H__
0011 #define GOOGLE_PROTOBUF_THREAD_SAFE_ARENA_H__
0012 
0013 #include <atomic>
0014 #include <cstddef>
0015 #include <cstdint>
0016 #include <type_traits>
0017 #include <vector>
0018 
0019 #include "absl/base/attributes.h"
0020 #include "absl/synchronization/mutex.h"
0021 #include "google/protobuf/arena_align.h"
0022 #include "google/protobuf/arena_allocation_policy.h"
0023 #include "google/protobuf/arena_cleanup.h"
0024 #include "google/protobuf/arenaz_sampler.h"
0025 #include "google/protobuf/port.h"
0026 #include "google/protobuf/serial_arena.h"
0027 
0028 // Must be included last.
0029 #include "google/protobuf/port_def.inc"
0030 
0031 namespace google {
0032 namespace protobuf {
0033 namespace internal {
0034 
0035 // This class provides the core Arena memory allocation library. Different
0036 // implementations only need to implement the public interface below.
0037 // Arena is not a template type as that would only be useful if all protos
0038 // in turn would be templates, which will/cannot happen. However separating
0039 // the memory allocation part from the cruft of the API users expect we can
0040 // use #ifdef the select the best implementation based on hardware / OS.
0041 class PROTOBUF_EXPORT ThreadSafeArena {
0042  public:
0043   ThreadSafeArena();
0044 
0045   ThreadSafeArena(char* mem, size_t size);
0046 
0047   explicit ThreadSafeArena(void* mem, size_t size,
0048                            const AllocationPolicy& policy);
0049 
0050   // All protos have pointers back to the arena hence Arena must have
0051   // pointer stability.
0052   ThreadSafeArena(const ThreadSafeArena&) = delete;
0053   ThreadSafeArena& operator=(const ThreadSafeArena&) = delete;
0054   ThreadSafeArena(ThreadSafeArena&&) = delete;
0055   ThreadSafeArena& operator=(ThreadSafeArena&&) = delete;
0056 
0057   // Destructor deletes all owned heap allocated objects, and destructs objects
0058   // that have non-trivial destructors, except for proto2 message objects whose
0059   // destructors can be skipped. Also, frees all blocks except the initial block
0060   // if it was passed in.
0061   ~ThreadSafeArena();
0062 
0063   uint64_t Reset();
0064 
0065   uint64_t SpaceAllocated() const;
0066   uint64_t SpaceUsed() const;
0067 
0068   template <AllocationClient alloc_client = AllocationClient::kDefault>
0069   void* AllocateAligned(size_t n) {
0070     SerialArena* arena;
0071     if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
0072       return arena->AllocateAligned<alloc_client>(n);
0073     } else {
0074       return AllocateAlignedFallback<alloc_client>(n);
0075     }
0076   }
0077 
0078   void ReturnArrayMemory(void* p, size_t size) {
0079     SerialArena* arena;
0080     if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
0081       arena->ReturnArrayMemory(p, size);
0082     }
0083   }
0084 
0085   // This function allocates n bytes if the common happy case is true and
0086   // returns true. Otherwise does nothing and returns false. This strange
0087   // semantics is necessary to allow callers to program functions that only
0088   // have fallback function calls in tail position. This substantially improves
0089   // code for the happy path.
0090   PROTOBUF_NDEBUG_INLINE bool MaybeAllocateAligned(size_t n, void** out) {
0091     SerialArena* arena;
0092     if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
0093       return arena->MaybeAllocateAligned(n, out);
0094     }
0095     return false;
0096   }
0097 
0098   void* AllocateAlignedWithCleanup(size_t n, size_t align,
0099                                    void (*destructor)(void*));
0100 
0101   // Add object pointer and cleanup function pointer to the list.
0102   void AddCleanup(void* elem, void (*cleanup)(void*));
0103 
0104   void* AllocateFromStringBlock();
0105 
0106   std::vector<void*> PeekCleanupListForTesting();
0107 
0108  private:
0109   friend class ArenaBenchmark;
0110   friend class TcParser;
0111   friend class SerialArena;
0112   friend struct SerialArenaChunkHeader;
0113   friend class cleanup::ChunkList;
0114   static uint64_t GetNextLifeCycleId();
0115 
0116   class SerialArenaChunk;
0117 
0118   // Returns a new SerialArenaChunk that has {id, serial} at slot 0. It may
0119   // grow based on "prev_num_slots".
0120   static SerialArenaChunk* NewSerialArenaChunk(uint32_t prev_capacity, void* id,
0121                                                SerialArena* serial);
0122   static SerialArenaChunk* SentrySerialArenaChunk();
0123 
0124   // Returns the first ArenaBlock* for the first SerialArena. If users provide
0125   // one, use it if it's acceptable. Otherwise returns a sentry block.
0126   ArenaBlock* FirstBlock(void* buf, size_t size);
0127   // Same as the above but returns a valid block if "policy" is not default.
0128   ArenaBlock* FirstBlock(void* buf, size_t size,
0129                          const AllocationPolicy& policy);
0130 
0131   // Adds SerialArena to the chunked list. May create a new chunk.
0132   void AddSerialArena(void* id, SerialArena* serial);
0133 
0134   void UnpoisonAllArenaBlocks() const;
0135 
0136   // Members are declared here to track sizeof(ThreadSafeArena) and hotness
0137   // centrally.
0138 
0139   // Unique for each arena. Changes on Reset().
0140   uint64_t tag_and_id_ = 0;
0141 
0142   TaggedAllocationPolicyPtr alloc_policy_;  // Tagged pointer to AllocPolicy.
0143   ThreadSafeArenaStatsHandle arena_stats_;
0144 
0145   // Adding a new chunk to head_ must be protected by mutex_.
0146   absl::Mutex mutex_;
0147   // Pointer to a linked list of SerialArenaChunk.
0148   std::atomic<SerialArenaChunk*> head_{nullptr};
0149 
0150   void* first_owner_;
0151   // Must be declared after alloc_policy_; otherwise, it may lose info on
0152   // user-provided initial block.
0153   SerialArena first_arena_;
0154 
0155   static_assert(std::is_trivially_destructible<SerialArena>{},
0156                 "SerialArena needs to be trivially destructible.");
0157 
0158   const AllocationPolicy* AllocPolicy() const { return alloc_policy_.get(); }
0159   void InitializeWithPolicy(const AllocationPolicy& policy);
0160   void* AllocateAlignedWithCleanupFallback(size_t n, size_t align,
0161                                            void (*destructor)(void*));
0162 
0163   void Init();
0164 
0165   // Delete or Destruct all objects owned by the arena.
0166   void CleanupList();
0167 
0168   inline void CacheSerialArena(SerialArena* serial) {
0169     thread_cache().last_serial_arena = serial;
0170     thread_cache().last_lifecycle_id_seen = tag_and_id_;
0171   }
0172 
0173   PROTOBUF_NDEBUG_INLINE bool GetSerialArenaFast(SerialArena** arena) {
0174     // If this thread already owns a block in this arena then try to use that.
0175     // This fast path optimizes the case where multiple threads allocate from
0176     // the same arena.
0177     ThreadCache* tc = &thread_cache();
0178     if (PROTOBUF_PREDICT_TRUE(tc->last_lifecycle_id_seen == tag_and_id_)) {
0179       *arena = tc->last_serial_arena;
0180       return true;
0181     }
0182     return false;
0183   }
0184 
0185   // Finds SerialArena or creates one if not found. When creating a new one,
0186   // create a big enough block to accommodate n bytes.
0187   SerialArena* GetSerialArenaFallback(size_t n);
0188 
0189   SerialArena* GetSerialArena();
0190 
0191   template <AllocationClient alloc_client = AllocationClient::kDefault>
0192   void* AllocateAlignedFallback(size_t n);
0193 
0194   // Executes callback function over SerialArenaChunk. Passes const
0195   // SerialArenaChunk*.
0196   template <typename Callback>
0197   void WalkConstSerialArenaChunk(Callback fn) const;
0198 
0199   // Executes callback function over SerialArenaChunk.
0200   template <typename Callback>
0201   void WalkSerialArenaChunk(Callback fn);
0202 
0203   // Visits SerialArena and calls "fn", including "first_arena" and ones on
0204   // chunks. Do not rely on the order of visit. The callback function should
0205   // accept `const SerialArena*`.
0206   template <typename Callback>
0207   void VisitSerialArena(Callback fn) const;
0208 
0209   // Releases all memory except the first block which it returns. The first
0210   // block might be owned by the user and thus need some extra checks before
0211   // deleting.
0212   SizedPtr Free();
0213 
0214   // ThreadCache is accessed very frequently, so we align it such that it's
0215   // located within a single cache line.
0216   static constexpr size_t kThreadCacheAlignment = 32;
0217 
0218 #ifdef _MSC_VER
0219 #pragma warning(disable : 4324)
0220 #endif
0221   struct alignas(kThreadCacheAlignment) ThreadCache {
0222     // Number of per-thread lifecycle IDs to reserve. Must be power of two.
0223     // To reduce contention on a global atomic, each thread reserves a batch of
0224     // IDs.  The following number is calculated based on a stress test with
0225     // ~6500 threads all frequently allocating a new arena.
0226     static constexpr size_t kPerThreadIds = 256;
0227     // Next lifecycle ID available to this thread. We need to reserve a new
0228     // batch, if `next_lifecycle_id & (kPerThreadIds - 1) == 0`.
0229     uint64_t next_lifecycle_id{0};
0230     // The ThreadCache is considered valid as long as this matches the
0231     // lifecycle_id of the arena being used.
0232     uint64_t last_lifecycle_id_seen{static_cast<uint64_t>(-1)};
0233     SerialArena* last_serial_arena{nullptr};
0234   };
0235   static_assert(sizeof(ThreadCache) <= kThreadCacheAlignment,
0236                 "ThreadCache may span several cache lines");
0237 
0238   // Lifecycle_id can be highly contended variable in a situation of lots of
0239   // arena creation. Make sure that other global variables are not sharing the
0240   // cacheline.
0241 #ifdef _MSC_VER
0242 #pragma warning(disable : 4324)
0243 #endif
0244   using LifecycleId = uint64_t;
0245   alignas(kCacheAlignment) ABSL_CONST_INIT
0246       static std::atomic<LifecycleId> lifecycle_id_;
0247 #if defined(PROTOBUF_NO_THREADLOCAL)
0248   // iOS does not support __thread keyword so we use a custom thread local
0249   // storage class we implemented.
0250   static ThreadCache& thread_cache();
0251 #elif defined(PROTOBUF_USE_DLLS) && defined(_WIN32)
0252   // Thread local variables cannot be exposed through MSVC DLL interface but we
0253   // can wrap them in static functions.
0254   static ThreadCache& thread_cache();
0255 #else
0256   PROTOBUF_CONSTINIT static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_;
0257   static ThreadCache& thread_cache() { return thread_cache_; }
0258 #endif
0259 
0260  public:
0261   // kBlockHeaderSize is sizeof(ArenaBlock), aligned up to the nearest multiple
0262   // of 8 to protect the invariant that pos is always at a multiple of 8.
0263   static constexpr size_t kBlockHeaderSize = SerialArena::kBlockHeaderSize;
0264   static constexpr size_t kSerialArenaSize =
0265       (sizeof(SerialArena) + 7) & static_cast<size_t>(-8);
0266   static constexpr size_t kAllocPolicySize =
0267       ArenaAlignDefault::Ceil(sizeof(AllocationPolicy));
0268   static constexpr size_t kMaxCleanupNodeSize = 16;
0269   static_assert(kBlockHeaderSize % 8 == 0,
0270                 "kBlockHeaderSize must be a multiple of 8.");
0271   static_assert(kSerialArenaSize % 8 == 0,
0272                 "kSerialArenaSize must be a multiple of 8.");
0273 };
0274 
0275 }  // namespace internal
0276 }  // namespace protobuf
0277 }  // namespace google
0278 
0279 #include "google/protobuf/port_undef.inc"
0280 
0281 #endif  // GOOGLE_PROTOBUF_THREAD_SAFE_ARENA_H__