Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-31 10:12:06

0001 // Protocol Buffers - Google's data interchange format
0002 // Copyright 2008 Google Inc.  All rights reserved.
0003 //
0004 // Use of this source code is governed by a BSD-style
0005 // license that can be found in the LICENSE file or at
0006 // https://developers.google.com/open-source/licenses/bsd
0007 
0008 #ifndef GOOGLE_PROTOBUF_SRC_GOOGLE_PROTOBUF_ARENAZ_SAMPLER_H__
0009 #define GOOGLE_PROTOBUF_SRC_GOOGLE_PROTOBUF_ARENAZ_SAMPLER_H__
0010 
0011 #include <array>
0012 #include <atomic>
0013 #include <cstddef>
0014 #include <cstdint>
0015 #include <utility>
0016 
0017 
0018 // Must be included last.
0019 #include "google/protobuf/port_def.inc"
0020 
0021 namespace google {
0022 namespace protobuf {
0023 namespace internal {
0024 
0025 #if defined(PROTOBUF_ARENAZ_SAMPLE)
0026 struct ThreadSafeArenaStats;
0027 void RecordAllocateSlow(ThreadSafeArenaStats* info, size_t used,
0028                         size_t allocated, size_t wasted);
0029 // Stores information about a sampled thread safe arena.  All mutations to this
0030 // *must* be made through `Record*` functions below.  All reads from this *must*
0031 // only occur in the callback to `ThreadSafeArenazSampler::Iterate`.
0032 struct ThreadSafeArenaStats
0033     : public absl::profiling_internal::Sample<ThreadSafeArenaStats> {
0034   // Constructs the object but does not fill in any fields.
0035   ThreadSafeArenaStats();
0036   ~ThreadSafeArenaStats();
0037 
0038   // Puts the object into a clean state, fills in the logically `const` members,
0039   // blocking for any readers that are currently sampling the object.  The
0040   // 'stride' parameter is the number of ThreadSafeArenas that were instantiated
0041   // between this sample and the previous one.
0042   void PrepareForSampling(int64_t stride)
0043       ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
0044 
0045   // These fields are mutated by the various Record* APIs and need to be
0046   // thread-safe.
0047   struct BlockStats {
0048     std::atomic<int> num_allocations;
0049     std::atomic<size_t> bytes_allocated;
0050     std::atomic<size_t> bytes_used;
0051     std::atomic<size_t> bytes_wasted;
0052 
0053     void PrepareForSampling();
0054   };
0055 
0056   // block_histogram is a kBlockHistogramBins sized histogram.  The zeroth bin
0057   // stores info about blocks of size \in [1, 1 << kLogMaxSizeForBinZero]. Bin
0058   // i, where i > 0, stores info for blocks of size \in (max_size_bin (i-1),
0059   // 1 << (kLogMaxSizeForBinZero + i)].  The final bin stores info about blocks
0060   // of size \in [kMaxSizeForPenultimateBin + 1,
0061   // std::numeric_limits<size_t>::max()].
0062   static constexpr size_t kBlockHistogramBins = 15;
0063   static constexpr size_t kLogMaxSizeForBinZero = 7;
0064   static constexpr size_t kMaxSizeForBinZero = (1 << kLogMaxSizeForBinZero);
0065   static constexpr size_t kMaxSizeForPenultimateBin =
0066       1 << (kLogMaxSizeForBinZero + kBlockHistogramBins - 2);
0067   std::array<BlockStats, kBlockHistogramBins> block_histogram;
0068 
0069   // Records the largest block allocated for the arena.
0070   std::atomic<size_t> max_block_size;
0071   // Bit `i` is set to 1 indicates that a thread with `tid % 63 = i` accessed
0072   // the underlying arena.  We use `% 63` as a rudimentary hash to ensure some
0073   // bit mixing for thread-ids; `% 64` would only grab the low bits and might
0074   // create sampling artifacts.
0075   std::atomic<uint64_t> thread_ids;
0076 
0077   // All of the fields below are set by `PrepareForSampling`, they must not
0078   // be mutated in `Record*` functions.  They are logically `const` in that
0079   // sense. These are guarded by init_mu, but that is not externalized to
0080   // clients, who can only read them during
0081   // `ThreadSafeArenazSampler::Iterate` which will hold the lock.
0082   static constexpr int kMaxStackDepth = 64;
0083   int32_t depth;
0084   void* stack[kMaxStackDepth];
0085   static void RecordAllocateStats(ThreadSafeArenaStats* info, size_t used,
0086                                   size_t allocated, size_t wasted) {
0087     if (PROTOBUF_PREDICT_TRUE(info == nullptr)) return;
0088     RecordAllocateSlow(info, used, allocated, wasted);
0089   }
0090 
0091   // Returns the bin for the provided size.
0092   static size_t FindBin(size_t bytes);
0093 
0094   // Returns the min and max bytes that can be stored in the histogram for
0095   // blocks in the provided bin.
0096   static std::pair<size_t, size_t> MinMaxBlockSizeForBin(size_t bin);
0097 };
0098 
0099 struct SamplingState {
0100   // Number of ThreadSafeArenas that should be instantiated before the next
0101   // ThreadSafeArena is sampled.  This variable is decremented with each
0102   // instantiation.
0103   int64_t next_sample;
0104   // When we make a sampling decision, we record that distance between from the
0105   // previous sample so we can weight each sample.  'distance' here is the
0106   // number of instantiations of ThreadSafeArena.
0107   int64_t sample_stride;
0108 };
0109 
0110 ThreadSafeArenaStats* SampleSlow(SamplingState& sampling_state);
0111 void UnsampleSlow(ThreadSafeArenaStats* info);
0112 
0113 class ThreadSafeArenaStatsHandle {
0114  public:
0115   explicit ThreadSafeArenaStatsHandle() = default;
0116   explicit ThreadSafeArenaStatsHandle(ThreadSafeArenaStats* info)
0117       : info_(info) {}
0118 
0119   ~ThreadSafeArenaStatsHandle() {
0120     if (PROTOBUF_PREDICT_TRUE(info_ == nullptr)) return;
0121     UnsampleSlow(info_);
0122   }
0123 
0124   ThreadSafeArenaStatsHandle(ThreadSafeArenaStatsHandle&& other) noexcept
0125       : info_(std::exchange(other.info_, nullptr)) {}
0126 
0127   ThreadSafeArenaStatsHandle& operator=(
0128       ThreadSafeArenaStatsHandle&& other) noexcept {
0129     if (PROTOBUF_PREDICT_FALSE(info_ != nullptr)) {
0130       UnsampleSlow(info_);
0131     }
0132     info_ = std::exchange(other.info_, nullptr);
0133     return *this;
0134   }
0135 
0136   ThreadSafeArenaStats* MutableStats() { return info_; }
0137 
0138   friend void swap(ThreadSafeArenaStatsHandle& lhs,
0139                    ThreadSafeArenaStatsHandle& rhs) {
0140     std::swap(lhs.info_, rhs.info_);
0141   }
0142 
0143   friend class ThreadSafeArenaStatsHandlePeer;
0144 
0145  private:
0146   ThreadSafeArenaStats* info_ = nullptr;
0147 };
0148 
0149 using ThreadSafeArenazSampler =
0150     ::absl::profiling_internal::SampleRecorder<ThreadSafeArenaStats>;
0151 
0152 extern PROTOBUF_THREAD_LOCAL SamplingState global_sampling_state;
0153 
0154 // Returns an RAII sampling handle that manages registration and unregistation
0155 // with the global sampler.
0156 inline ThreadSafeArenaStatsHandle Sample() {
0157   if (PROTOBUF_PREDICT_TRUE(--global_sampling_state.next_sample > 0)) {
0158     return ThreadSafeArenaStatsHandle(nullptr);
0159   }
0160   return ThreadSafeArenaStatsHandle(SampleSlow(global_sampling_state));
0161 }
0162 
0163 #else
0164 
0165 using SamplingState = int64_t;
0166 
0167 struct ThreadSafeArenaStats {
0168   static void RecordAllocateStats(ThreadSafeArenaStats*, size_t /*requested*/,
0169                                   size_t /*allocated*/, size_t /*wasted*/) {}
0170 };
0171 
0172 ThreadSafeArenaStats* SampleSlow(SamplingState& next_sample);
0173 void UnsampleSlow(ThreadSafeArenaStats* info);
0174 
0175 class ThreadSafeArenaStatsHandle {
0176  public:
0177   explicit ThreadSafeArenaStatsHandle() = default;
0178   explicit ThreadSafeArenaStatsHandle(ThreadSafeArenaStats*) {}
0179 
0180   void RecordReset() {}
0181 
0182   ThreadSafeArenaStats* MutableStats() { return nullptr; }
0183 
0184   friend void swap(ThreadSafeArenaStatsHandle&, ThreadSafeArenaStatsHandle&) {}
0185 
0186  private:
0187   friend class ThreadSafeArenaStatsHandlePeer;
0188 };
0189 
0190 class ThreadSafeArenazSampler {
0191  public:
0192   void Unregister(ThreadSafeArenaStats*) {}
0193   void SetMaxSamples(int32_t) {}
0194 };
0195 
0196 // Returns an RAII sampling handle that manages registration and unregistation
0197 // with the global sampler.
0198 inline ThreadSafeArenaStatsHandle Sample() {
0199   return ThreadSafeArenaStatsHandle(nullptr);
0200 }
0201 #endif  // defined(PROTOBUF_ARENAZ_SAMPLE)
0202 
0203 // Returns a global Sampler.
0204 ThreadSafeArenazSampler& GlobalThreadSafeArenazSampler();
0205 
0206 using ThreadSafeArenazConfigListener = void (*)();
0207 void SetThreadSafeArenazConfigListener(ThreadSafeArenazConfigListener l);
0208 
0209 // Enables or disables sampling for thread safe arenas.
0210 void SetThreadSafeArenazEnabled(bool enabled);
0211 void SetThreadSafeArenazEnabledInternal(bool enabled);
0212 
0213 // Returns true if sampling is on, false otherwise.
0214 bool IsThreadSafeArenazEnabled();
0215 
0216 // Sets the rate at which thread safe arena will be sampled.
0217 void SetThreadSafeArenazSampleParameter(int32_t rate);
0218 void SetThreadSafeArenazSampleParameterInternal(int32_t rate);
0219 
0220 // Returns the rate at which thread safe arena will be sampled.
0221 int32_t ThreadSafeArenazSampleParameter();
0222 
0223 // Sets a soft max for the number of samples that will be kept.
0224 void SetThreadSafeArenazMaxSamples(int32_t max);
0225 void SetThreadSafeArenazMaxSamplesInternal(int32_t max);
0226 
0227 // Returns the max number of samples that will be kept.
0228 size_t ThreadSafeArenazMaxSamples();
0229 
0230 // Sets the current value for when arenas should be next sampled.
0231 void SetThreadSafeArenazGlobalNextSample(int64_t next_sample);
0232 
0233 }  // namespace internal
0234 }  // namespace protobuf
0235 }  // namespace google
0236 
0237 #include "google/protobuf/port_undef.inc"
0238 #endif  // GOOGLE_PROTOBUF_SRC_PROTOBUF_ARENAZ_SAMPLER_H__