Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-31 10:12:22

0001 // Protocol Buffers - Google's data interchange format
0002 // Copyright 2008 Google Inc.  All rights reserved.
0003 //
0004 // Use of this source code is governed by a BSD-style
0005 // license that can be found in the LICENSE file or at
0006 // https://developers.google.com/open-source/licenses/bsd
0007 
0008 // This file defines the map container and its helpers to support protobuf maps.
0009 //
0010 // The Map and MapIterator types are provided by this header file.
0011 // Please avoid using other types defined here, unless they are public
0012 // types within Map or MapIterator, such as Map::value_type.
0013 
0014 #ifndef GOOGLE_PROTOBUF_MAP_H__
0015 #define GOOGLE_PROTOBUF_MAP_H__
0016 
0017 #include <algorithm>
0018 #include <cstddef>
0019 #include <cstdint>
0020 #include <functional>
0021 #include <initializer_list>
0022 #include <iterator>
0023 #include <limits>  // To support Visual Studio 2008
0024 #include <string>
0025 #include <type_traits>
0026 #include <utility>
0027 
0028 #if !defined(GOOGLE_PROTOBUF_NO_RDTSC) && defined(__APPLE__)
0029 #include <time.h>
0030 #endif
0031 
0032 #include "google/protobuf/stubs/common.h"
0033 #include "absl/base/attributes.h"
0034 #include "absl/container/btree_map.h"
0035 #include "absl/hash/hash.h"
0036 #include "absl/log/absl_check.h"
0037 #include "absl/meta/type_traits.h"
0038 #include "absl/strings/string_view.h"
0039 #include "google/protobuf/arena.h"
0040 #include "google/protobuf/generated_enum_util.h"
0041 #include "google/protobuf/internal_visibility.h"
0042 #include "google/protobuf/map_type_handler.h"
0043 #include "google/protobuf/port.h"
0044 #include "google/protobuf/wire_format_lite.h"
0045 
0046 
0047 #ifdef SWIG
0048 #error "You cannot SWIG proto headers"
0049 #endif
0050 
0051 // Must be included last.
0052 #include "google/protobuf/port_def.inc"
0053 
0054 namespace google {
0055 namespace protobuf {
0056 
0057 template <typename Key, typename T>
0058 class Map;
0059 
0060 class MapIterator;
0061 
0062 template <typename Enum>
0063 struct is_proto_enum;
0064 
0065 namespace internal {
0066 template <typename Key, typename T>
0067 class MapFieldLite;
0068 
0069 template <typename Derived, typename Key, typename T,
0070           WireFormatLite::FieldType key_wire_type,
0071           WireFormatLite::FieldType value_wire_type>
0072 class MapField;
0073 
0074 struct MapTestPeer;
0075 struct MapBenchmarkPeer;
0076 
0077 template <typename Key, typename T>
0078 class TypeDefinedMapFieldBase;
0079 
0080 class DynamicMapField;
0081 
0082 class GeneratedMessageReflection;
0083 
0084 // The largest valid serialization for a message is INT_MAX, so we can't have
0085 // more than 32-bits worth of elements.
0086 using map_index_t = uint32_t;
0087 
0088 // Internal type traits that can be used to define custom key/value types. These
0089 // are only be specialized by protobuf internals, and never by users.
0090 template <typename T, typename VoidT = void>
0091 struct is_internal_map_key_type : std::false_type {};
0092 
0093 template <typename T, typename VoidT = void>
0094 struct is_internal_map_value_type : std::false_type {};
0095 
0096 // re-implement std::allocator to use arena allocator for memory allocation.
0097 // Used for Map implementation. Users should not use this class
0098 // directly.
0099 template <typename U>
0100 class MapAllocator {
0101  public:
0102   using value_type = U;
0103   using pointer = value_type*;
0104   using const_pointer = const value_type*;
0105   using reference = value_type&;
0106   using const_reference = const value_type&;
0107   using size_type = size_t;
0108   using difference_type = ptrdiff_t;
0109 
0110   constexpr MapAllocator() : arena_(nullptr) {}
0111   explicit constexpr MapAllocator(Arena* arena) : arena_(arena) {}
0112   template <typename X>
0113   MapAllocator(const MapAllocator<X>& allocator)  // NOLINT(runtime/explicit)
0114       : arena_(allocator.arena()) {}
0115 
0116   // MapAllocator does not support alignments beyond 8. Technically we should
0117   // support up to std::max_align_t, but this fails with ubsan and tcmalloc
0118   // debug allocation logic which assume 8 as default alignment.
0119   static_assert(alignof(value_type) <= 8, "");
0120 
0121   pointer allocate(size_type n, const void* /* hint */ = nullptr) {
0122     // If arena is not given, malloc needs to be called which doesn't
0123     // construct element object.
0124     if (arena_ == nullptr) {
0125       return static_cast<pointer>(::operator new(n * sizeof(value_type)));
0126     } else {
0127       return reinterpret_cast<pointer>(
0128           Arena::CreateArray<uint8_t>(arena_, n * sizeof(value_type)));
0129     }
0130   }
0131 
0132   void deallocate(pointer p, size_type n) {
0133     if (arena_ == nullptr) {
0134       internal::SizedDelete(p, n * sizeof(value_type));
0135     }
0136   }
0137 
0138 #if !defined(GOOGLE_PROTOBUF_OS_APPLE) && !defined(GOOGLE_PROTOBUF_OS_NACL) && \
0139     !defined(GOOGLE_PROTOBUF_OS_EMSCRIPTEN)
0140   template <class NodeType, class... Args>
0141   void construct(NodeType* p, Args&&... args) {
0142     // Clang 3.6 doesn't compile static casting to void* directly. (Issue
0143     // #1266) According C++ standard 5.2.9/1: "The static_cast operator shall
0144     // not cast away constness". So first the maybe const pointer is casted to
0145     // const void* and after the const void* is const casted.
0146     new (const_cast<void*>(static_cast<const void*>(p)))
0147         NodeType(std::forward<Args>(args)...);
0148   }
0149 
0150   template <class NodeType>
0151   void destroy(NodeType* p) {
0152     p->~NodeType();
0153   }
0154 #else
0155   void construct(pointer p, const_reference t) { new (p) value_type(t); }
0156 
0157   void destroy(pointer p) { p->~value_type(); }
0158 #endif
0159 
0160   template <typename X>
0161   struct rebind {
0162     using other = MapAllocator<X>;
0163   };
0164 
0165   template <typename X>
0166   bool operator==(const MapAllocator<X>& other) const {
0167     return arena_ == other.arena_;
0168   }
0169 
0170   template <typename X>
0171   bool operator!=(const MapAllocator<X>& other) const {
0172     return arena_ != other.arena_;
0173   }
0174 
0175   // To support Visual Studio 2008
0176   size_type max_size() const {
0177     // parentheses around (std::...:max) prevents macro warning of max()
0178     return (std::numeric_limits<size_type>::max)();
0179   }
0180 
0181   // To support gcc-4.4, which does not properly
0182   // support templated friend classes
0183   Arena* arena() const { return arena_; }
0184 
0185  private:
0186   using DestructorSkippable_ = void;
0187   Arena* arena_;
0188 };
0189 
0190 // To save on binary size and simplify generic uses of the map types we collapse
0191 // signed/unsigned versions of the same sized integer to the unsigned version.
0192 template <typename T, typename = void>
0193 struct KeyForBaseImpl {
0194   using type = T;
0195 };
0196 template <typename T>
0197 struct KeyForBaseImpl<T, std::enable_if_t<std::is_integral<T>::value &&
0198                                           std::is_signed<T>::value>> {
0199   using type = std::make_unsigned_t<T>;
0200 };
0201 template <typename T>
0202 using KeyForBase = typename KeyForBaseImpl<T>::type;
0203 
0204 // Default case: Not transparent.
0205 // We use std::hash<key_type>/std::less<key_type> and all the lookup functions
0206 // only accept `key_type`.
0207 template <typename key_type>
0208 struct TransparentSupport {
0209   // We hash all the scalars as uint64_t so that we can implement the same hash
0210   // function for VariantKey. This way we can have MapKey provide the same hash
0211   // as the underlying value would have.
0212   using hash = std::hash<
0213       std::conditional_t<std::is_scalar<key_type>::value, uint64_t, key_type>>;
0214 
0215   static bool Equals(const key_type& a, const key_type& b) { return a == b; }
0216 
0217   template <typename K>
0218   using key_arg = key_type;
0219 
0220   using ViewType = std::conditional_t<std::is_scalar<key_type>::value, key_type,
0221                                       const key_type&>;
0222   static ViewType ToView(const key_type& v) { return v; }
0223 };
0224 
0225 // We add transparent support for std::string keys. We use
0226 // std::hash<absl::string_view> as it supports the input types we care about.
0227 // The lookup functions accept arbitrary `K`. This will include any key type
0228 // that is convertible to absl::string_view.
0229 template <>
0230 struct TransparentSupport<std::string> {
0231   // Use go/ranked-overloads for dispatching.
0232   struct Rank0 {};
0233   struct Rank1 : Rank0 {};
0234   struct Rank2 : Rank1 {};
0235   template <typename T, typename = std::enable_if_t<
0236                             std::is_convertible<T, absl::string_view>::value>>
0237   static absl::string_view ImplicitConvertImpl(T&& str, Rank2) {
0238     absl::string_view ref = str;
0239     return ref;
0240   }
0241   template <typename T, typename = std::enable_if_t<
0242                             std::is_convertible<T, const std::string&>::value>>
0243   static absl::string_view ImplicitConvertImpl(T&& str, Rank1) {
0244     const std::string& ref = str;
0245     return ref;
0246   }
0247   template <typename T>
0248   static absl::string_view ImplicitConvertImpl(T&& str, Rank0) {
0249     return {str.data(), str.size()};
0250   }
0251 
0252   template <typename T>
0253   static absl::string_view ImplicitConvert(T&& str) {
0254     return ImplicitConvertImpl(std::forward<T>(str), Rank2{});
0255   }
0256 
0257   struct hash : public absl::Hash<absl::string_view> {
0258     using is_transparent = void;
0259 
0260     template <typename T>
0261     size_t operator()(T&& str) const {
0262       return absl::Hash<absl::string_view>::operator()(
0263           ImplicitConvert(std::forward<T>(str)));
0264     }
0265   };
0266 
0267   template <typename T, typename U>
0268   static bool Equals(T&& t, U&& u) {
0269     return ImplicitConvert(std::forward<T>(t)) ==
0270            ImplicitConvert(std::forward<U>(u));
0271   }
0272 
0273   template <typename K>
0274   using key_arg = K;
0275 
0276   using ViewType = absl::string_view;
0277   template <typename T>
0278   static ViewType ToView(const T& v) {
0279     return ImplicitConvert(v);
0280   }
0281 };
0282 
0283 enum class MapNodeSizeInfoT : uint32_t;
0284 inline uint16_t SizeFromInfo(MapNodeSizeInfoT node_size_info) {
0285   return static_cast<uint16_t>(static_cast<uint32_t>(node_size_info) >> 16);
0286 }
0287 inline uint16_t ValueOffsetFromInfo(MapNodeSizeInfoT node_size_info) {
0288   return static_cast<uint16_t>(static_cast<uint32_t>(node_size_info) >> 0);
0289 }
0290 constexpr MapNodeSizeInfoT MakeNodeInfo(uint16_t size, uint16_t value_offset) {
0291   return static_cast<MapNodeSizeInfoT>((static_cast<uint32_t>(size) << 16) |
0292                                        value_offset);
0293 }
0294 
0295 struct NodeBase {
0296   // Align the node to allow KeyNode to predict the location of the key.
0297   // This way sizeof(NodeBase) contains any possible padding it was going to
0298   // have between NodeBase and the key.
0299   alignas(kMaxMessageAlignment) NodeBase* next;
0300 
0301   void* GetVoidKey() { return this + 1; }
0302   const void* GetVoidKey() const { return this + 1; }
0303 
0304   void* GetVoidValue(MapNodeSizeInfoT size_info) {
0305     return reinterpret_cast<char*>(this) + ValueOffsetFromInfo(size_info);
0306   }
0307 };
0308 
0309 inline NodeBase* EraseFromLinkedList(NodeBase* item, NodeBase* head) {
0310   if (head == item) {
0311     return head->next;
0312   } else {
0313     head->next = EraseFromLinkedList(item, head->next);
0314     return head;
0315   }
0316 }
0317 
0318 constexpr size_t MapTreeLengthThreshold() { return 8; }
0319 inline bool TableEntryIsTooLong(NodeBase* node) {
0320   const size_t kMaxLength = MapTreeLengthThreshold();
0321   size_t count = 0;
0322   do {
0323     ++count;
0324     node = node->next;
0325   } while (node != nullptr);
0326   // Invariant: no linked list ever is more than kMaxLength in length.
0327   ABSL_DCHECK_LE(count, kMaxLength);
0328   return count >= kMaxLength;
0329 }
0330 
0331 // Similar to the public MapKey, but specialized for the internal
0332 // implementation.
0333 struct VariantKey {
0334   // We make this value 16 bytes to make it cheaper to pass in the ABI.
0335   // Can't overload string_view this way, so we unpack the fields.
0336   // data==nullptr means this is a number and `integral` is the value.
0337   // data!=nullptr means this is a string and `integral` is the size.
0338   const char* data;
0339   uint64_t integral;
0340 
0341   explicit VariantKey(uint64_t v) : data(nullptr), integral(v) {}
0342   explicit VariantKey(absl::string_view v)
0343       : data(v.data()), integral(v.size()) {
0344     // We use `data` to discriminate between the types, so make sure it is never
0345     // null here.
0346     if (data == nullptr) data = "";
0347   }
0348 
0349   size_t Hash() const {
0350     return data == nullptr ? std::hash<uint64_t>{}(integral)
0351                            : absl::Hash<absl::string_view>{}(
0352                                  absl::string_view(data, integral));
0353   }
0354 
0355   friend bool operator<(const VariantKey& left, const VariantKey& right) {
0356     ABSL_DCHECK_EQ(left.data == nullptr, right.data == nullptr);
0357     if (left.integral != right.integral) {
0358       // If they are numbers with different value, or strings with different
0359       // size, check the number only.
0360       return left.integral < right.integral;
0361     }
0362     if (left.data == nullptr) {
0363       // If they are numbers they have the same value, so return.
0364       return false;
0365     }
0366     // They are strings of the same size, so check the bytes.
0367     return memcmp(left.data, right.data, left.integral) < 0;
0368   }
0369 };
0370 
0371 // This is to be specialized by MapKey.
0372 template <typename T>
0373 struct RealKeyToVariantKey {
0374   VariantKey operator()(T value) const { return VariantKey(value); }
0375 };
0376 
0377 template <>
0378 struct RealKeyToVariantKey<std::string> {
0379   template <typename T>
0380   VariantKey operator()(const T& value) const {
0381     return VariantKey(TransparentSupport<std::string>::ImplicitConvert(value));
0382   }
0383 };
0384 
0385 // We use a single kind of tree for all maps. This reduces code duplication.
0386 using TreeForMap =
0387     absl::btree_map<VariantKey, NodeBase*, std::less<VariantKey>,
0388                     MapAllocator<std::pair<const VariantKey, NodeBase*>>>;
0389 
0390 // Type safe tagged pointer.
0391 // We convert to/from nodes and trees using the operations below.
0392 // They ensure that the tags are used correctly.
0393 // There are three states:
0394 //  - x == 0: the entry is empty
0395 //  - x != 0 && (x&1) == 0: the entry is a node list
0396 //  - x != 0 && (x&1) == 1: the entry is a tree
0397 enum class TableEntryPtr : uintptr_t;
0398 
0399 inline bool TableEntryIsEmpty(TableEntryPtr entry) {
0400   return entry == TableEntryPtr{};
0401 }
0402 inline bool TableEntryIsTree(TableEntryPtr entry) {
0403   return (static_cast<uintptr_t>(entry) & 1) == 1;
0404 }
0405 inline bool TableEntryIsList(TableEntryPtr entry) {
0406   return !TableEntryIsTree(entry);
0407 }
0408 inline bool TableEntryIsNonEmptyList(TableEntryPtr entry) {
0409   return !TableEntryIsEmpty(entry) && TableEntryIsList(entry);
0410 }
0411 inline NodeBase* TableEntryToNode(TableEntryPtr entry) {
0412   ABSL_DCHECK(TableEntryIsList(entry));
0413   return reinterpret_cast<NodeBase*>(static_cast<uintptr_t>(entry));
0414 }
0415 inline TableEntryPtr NodeToTableEntry(NodeBase* node) {
0416   ABSL_DCHECK((reinterpret_cast<uintptr_t>(node) & 1) == 0);
0417   return static_cast<TableEntryPtr>(reinterpret_cast<uintptr_t>(node));
0418 }
0419 inline TreeForMap* TableEntryToTree(TableEntryPtr entry) {
0420   ABSL_DCHECK(TableEntryIsTree(entry));
0421   return reinterpret_cast<TreeForMap*>(static_cast<uintptr_t>(entry) - 1);
0422 }
0423 inline TableEntryPtr TreeToTableEntry(TreeForMap* node) {
0424   ABSL_DCHECK((reinterpret_cast<uintptr_t>(node) & 1) == 0);
0425   return static_cast<TableEntryPtr>(reinterpret_cast<uintptr_t>(node) | 1);
0426 }
0427 
0428 // This captures all numeric types.
0429 inline size_t MapValueSpaceUsedExcludingSelfLong(bool) { return 0; }
0430 inline size_t MapValueSpaceUsedExcludingSelfLong(const std::string& str) {
0431   return StringSpaceUsedExcludingSelfLong(str);
0432 }
0433 template <typename T,
0434           typename = decltype(std::declval<const T&>().SpaceUsedLong())>
0435 size_t MapValueSpaceUsedExcludingSelfLong(const T& message) {
0436   return message.SpaceUsedLong() - sizeof(T);
0437 }
0438 
0439 constexpr size_t kGlobalEmptyTableSize = 1;
0440 PROTOBUF_EXPORT extern const TableEntryPtr
0441     kGlobalEmptyTable[kGlobalEmptyTableSize];
0442 
0443 template <typename Map,
0444           typename = typename std::enable_if<
0445               !std::is_scalar<typename Map::key_type>::value ||
0446               !std::is_scalar<typename Map::mapped_type>::value>::type>
0447 size_t SpaceUsedInValues(const Map* map) {
0448   size_t size = 0;
0449   for (const auto& v : *map) {
0450     size += internal::MapValueSpaceUsedExcludingSelfLong(v.first) +
0451             internal::MapValueSpaceUsedExcludingSelfLong(v.second);
0452   }
0453   return size;
0454 }
0455 
0456 inline size_t SpaceUsedInValues(const void*) { return 0; }
0457 
0458 class UntypedMapBase;
0459 
0460 class UntypedMapIterator {
0461  public:
0462   // Invariants:
0463   // node_ is always correct. This is handy because the most common
0464   // operations are operator* and operator-> and they only use node_.
0465   // When node_ is set to a non-null value, all the other non-const fields
0466   // are updated to be correct also, but those fields can become stale
0467   // if the underlying map is modified.  When those fields are needed they
0468   // are rechecked, and updated if necessary.
0469   UntypedMapIterator() : node_(nullptr), m_(nullptr), bucket_index_(0) {}
0470 
0471   explicit UntypedMapIterator(const UntypedMapBase* m);
0472 
0473   UntypedMapIterator(NodeBase* n, const UntypedMapBase* m, map_index_t index)
0474       : node_(n), m_(m), bucket_index_(index) {}
0475 
0476   // Advance through buckets, looking for the first that isn't empty.
0477   // If nothing non-empty is found then leave node_ == nullptr.
0478   void SearchFrom(map_index_t start_bucket);
0479 
0480   // The definition of operator== is handled by the derived type. If we were
0481   // to do it in this class it would allow comparing iterators of different
0482   // map types.
0483   bool Equals(const UntypedMapIterator& other) const {
0484     return node_ == other.node_;
0485   }
0486 
0487   // The definition of operator++ is handled in the derived type. We would not
0488   // be able to return the right type from here.
0489   void PlusPlus() {
0490     if (node_->next == nullptr) {
0491       SearchFrom(bucket_index_ + 1);
0492     } else {
0493       node_ = node_->next;
0494     }
0495   }
0496 
0497   // Conversion to and from a typed iterator child class is used by FFI.
0498   template <class Iter>
0499   static UntypedMapIterator FromTyped(Iter it) {
0500     static_assert(
0501 #if defined(__cpp_lib_is_layout_compatible) && \
0502     __cpp_lib_is_layout_compatible >= 201907L
0503         std::is_layout_compatible_v<Iter, UntypedMapIterator>,
0504 #else
0505         sizeof(it) == sizeof(UntypedMapIterator),
0506 #endif
0507         "Map iterator must not have extra state that the base class"
0508         "does not define.");
0509     return static_cast<UntypedMapIterator>(it);
0510   }
0511 
0512   template <class Iter>
0513   Iter ToTyped() const {
0514     return Iter(*this);
0515   }
0516   NodeBase* node_;
0517   const UntypedMapBase* m_;
0518   map_index_t bucket_index_;
0519 };
0520 
0521 // These properties are depended upon by Rust FFI.
0522 static_assert(std::is_trivially_copyable<UntypedMapIterator>::value,
0523               "UntypedMapIterator must be trivially copyable.");
0524 static_assert(std::is_trivially_destructible<UntypedMapIterator>::value,
0525               "UntypedMapIterator must be trivially destructible.");
0526 static_assert(std::is_standard_layout<UntypedMapIterator>::value,
0527               "UntypedMapIterator must be standard layout.");
0528 static_assert(offsetof(UntypedMapIterator, node_) == 0,
0529               "node_ must be the first field of UntypedMapIterator.");
0530 static_assert(sizeof(UntypedMapIterator) ==
0531                   sizeof(void*) * 2 +
0532                       std::max(sizeof(uint32_t), alignof(void*)),
0533               "UntypedMapIterator does not have the expected size for FFI");
0534 static_assert(
0535     alignof(UntypedMapIterator) == std::max(alignof(void*), alignof(uint32_t)),
0536     "UntypedMapIterator does not have the expected alignment for FFI");
0537 
0538 // Base class for all Map instantiations.
0539 // This class holds all the data and provides the basic functionality shared
0540 // among all instantiations.
0541 // Having an untyped base class helps generic consumers (like the table-driven
0542 // parser) by having non-template code that can handle all instantiations.
0543 class PROTOBUF_EXPORT UntypedMapBase {
0544   using Allocator = internal::MapAllocator<void*>;
0545   using Tree = internal::TreeForMap;
0546 
0547  public:
0548   using size_type = size_t;
0549 
0550   explicit constexpr UntypedMapBase(Arena* arena)
0551       : num_elements_(0),
0552         num_buckets_(internal::kGlobalEmptyTableSize),
0553         seed_(0),
0554         index_of_first_non_null_(internal::kGlobalEmptyTableSize),
0555         table_(const_cast<TableEntryPtr*>(internal::kGlobalEmptyTable)),
0556         alloc_(arena) {}
0557 
0558   UntypedMapBase(const UntypedMapBase&) = delete;
0559   UntypedMapBase& operator=(const UntypedMapBase&) = delete;
0560 
0561  protected:
0562   // 16 bytes is the minimum useful size for the array cache in the arena.
0563   enum : map_index_t { kMinTableSize = 16 / sizeof(void*) };
0564 
0565  public:
0566   Arena* arena() const { return this->alloc_.arena(); }
0567 
0568   void InternalSwap(UntypedMapBase* other) {
0569     std::swap(num_elements_, other->num_elements_);
0570     std::swap(num_buckets_, other->num_buckets_);
0571     std::swap(seed_, other->seed_);
0572     std::swap(index_of_first_non_null_, other->index_of_first_non_null_);
0573     std::swap(table_, other->table_);
0574     std::swap(alloc_, other->alloc_);
0575   }
0576 
0577   static size_type max_size() {
0578     return std::numeric_limits<map_index_t>::max();
0579   }
0580   size_type size() const { return num_elements_; }
0581   bool empty() const { return size() == 0; }
0582 
0583   UntypedMapIterator begin() const { return UntypedMapIterator(this); }
0584   // We make this a static function to reduce the cost in MapField.
0585   // All the end iterators are singletons anyway.
0586   static UntypedMapIterator EndIterator() { return {}; }
0587 
0588  protected:
0589   friend class TcParser;
0590   friend struct MapTestPeer;
0591   friend struct MapBenchmarkPeer;
0592   friend class UntypedMapIterator;
0593 
0594   struct NodeAndBucket {
0595     NodeBase* node;
0596     map_index_t bucket;
0597   };
0598 
0599   // Returns whether we should insert after the head of the list. For
0600   // non-optimized builds, we randomly decide whether to insert right at the
0601   // head of the list or just after the head. This helps add a little bit of
0602   // non-determinism to the map ordering.
0603   bool ShouldInsertAfterHead(void* node) {
0604 #ifdef NDEBUG
0605     (void)node;
0606     return false;
0607 #else
0608     // Doing modulo with a prime mixes the bits more.
0609     return (reinterpret_cast<uintptr_t>(node) ^ seed_) % 13 > 6;
0610 #endif
0611   }
0612 
0613   // Helper for InsertUnique.  Handles the case where bucket b is a
0614   // not-too-long linked list.
0615   void InsertUniqueInList(map_index_t b, NodeBase* node) {
0616     if (!TableEntryIsEmpty(b) && ShouldInsertAfterHead(node)) {
0617       auto* first = TableEntryToNode(table_[b]);
0618       node->next = first->next;
0619       first->next = node;
0620     } else {
0621       node->next = TableEntryToNode(table_[b]);
0622       table_[b] = NodeToTableEntry(node);
0623     }
0624   }
0625 
0626   bool TableEntryIsEmpty(map_index_t b) const {
0627     return internal::TableEntryIsEmpty(table_[b]);
0628   }
0629   bool TableEntryIsNonEmptyList(map_index_t b) const {
0630     return internal::TableEntryIsNonEmptyList(table_[b]);
0631   }
0632   bool TableEntryIsTree(map_index_t b) const {
0633     return internal::TableEntryIsTree(table_[b]);
0634   }
0635   bool TableEntryIsList(map_index_t b) const {
0636     return internal::TableEntryIsList(table_[b]);
0637   }
0638 
0639   // Return whether table_[b] is a linked list that seems awfully long.
0640   // Requires table_[b] to point to a non-empty linked list.
0641   bool TableEntryIsTooLong(map_index_t b) {
0642     return internal::TableEntryIsTooLong(TableEntryToNode(table_[b]));
0643   }
0644 
0645   // Return a power of two no less than max(kMinTableSize, n).
0646   // Assumes either n < kMinTableSize or n is a power of two.
0647   map_index_t TableSize(map_index_t n) {
0648     return n < kMinTableSize ? kMinTableSize : n;
0649   }
0650 
0651   template <typename T>
0652   using AllocFor = absl::allocator_traits<Allocator>::template rebind_alloc<T>;
0653 
0654   // Alignment of the nodes is the same as alignment of NodeBase.
0655   NodeBase* AllocNode(MapNodeSizeInfoT size_info) {
0656     return AllocNode(SizeFromInfo(size_info));
0657   }
0658 
0659   NodeBase* AllocNode(size_t node_size) {
0660     PROTOBUF_ASSUME(node_size % sizeof(NodeBase) == 0);
0661     return AllocFor<NodeBase>(alloc_).allocate(node_size / sizeof(NodeBase));
0662   }
0663 
0664   void DeallocNode(NodeBase* node, MapNodeSizeInfoT size_info) {
0665     DeallocNode(node, SizeFromInfo(size_info));
0666   }
0667 
0668   void DeallocNode(NodeBase* node, size_t node_size) {
0669     PROTOBUF_ASSUME(node_size % sizeof(NodeBase) == 0);
0670     AllocFor<NodeBase>(alloc_).deallocate(node, node_size / sizeof(NodeBase));
0671   }
0672 
0673   void DeleteTable(TableEntryPtr* table, map_index_t n) {
0674     if (auto* a = arena()) {
0675       a->ReturnArrayMemory(table, n * sizeof(TableEntryPtr));
0676     } else {
0677       internal::SizedDelete(table, n * sizeof(TableEntryPtr));
0678     }
0679   }
0680 
0681   NodeBase* DestroyTree(Tree* tree);
0682   using GetKey = VariantKey (*)(NodeBase*);
0683   void InsertUniqueInTree(map_index_t b, GetKey get_key, NodeBase* node);
0684   void TransferTree(Tree* tree, GetKey get_key);
0685   TableEntryPtr ConvertToTree(NodeBase* node, GetKey get_key);
0686   void EraseFromTree(map_index_t b, typename Tree::iterator tree_it);
0687 
0688   map_index_t VariantBucketNumber(VariantKey key) const;
0689 
0690   map_index_t BucketNumberFromHash(uint64_t h) const {
0691     // We xor the hash value against the random seed so that we effectively
0692     // have a random hash function.
0693     // We use absl::Hash to do bit mixing for uniform bucket selection.
0694     return absl::HashOf(h ^ seed_) & (num_buckets_ - 1);
0695   }
0696 
0697   TableEntryPtr* CreateEmptyTable(map_index_t n) {
0698     ABSL_DCHECK_GE(n, kMinTableSize);
0699     ABSL_DCHECK_EQ(n & (n - 1), 0u);
0700     TableEntryPtr* result = AllocFor<TableEntryPtr>(alloc_).allocate(n);
0701     memset(result, 0, n * sizeof(result[0]));
0702     return result;
0703   }
0704 
0705   // Return a randomish value.
0706   map_index_t Seed() const {
0707     uint64_t s = 0;
0708 #if !defined(GOOGLE_PROTOBUF_NO_RDTSC)
0709 #if defined(__APPLE__)
0710     // Use a commpage-based fast time function on Apple environments (MacOS,
0711     // iOS, tvOS, watchOS, etc).
0712     s = clock_gettime_nsec_np(CLOCK_UPTIME_RAW);
0713 #elif defined(__x86_64__) && defined(__GNUC__)
0714     uint32_t hi, lo;
0715     asm volatile("rdtsc" : "=a"(lo), "=d"(hi));
0716     s = ((static_cast<uint64_t>(hi) << 32) | lo);
0717 #elif defined(__aarch64__) && defined(__GNUC__)
0718     // There is no rdtsc on ARMv8. CNTVCT_EL0 is the virtual counter of the
0719     // system timer. It runs at a different frequency than the CPU's, but is
0720     // the best source of time-based entropy we get.
0721     uint64_t virtual_timer_value;
0722     asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
0723     s = virtual_timer_value;
0724 #endif
0725 #endif  // !defined(GOOGLE_PROTOBUF_NO_RDTSC)
0726     // Add entropy from the address of the map and the address of the table
0727     // array.
0728     return static_cast<map_index_t>(
0729         absl::HashOf(s, table_, static_cast<const void*>(this)));
0730   }
0731 
0732   enum {
0733     kKeyIsString = 1 << 0,
0734     kValueIsString = 1 << 1,
0735     kValueIsProto = 1 << 2,
0736     kUseDestructFunc = 1 << 3,
0737   };
0738   template <typename Key, typename Value>
0739   static constexpr uint8_t MakeDestroyBits() {
0740     uint8_t result = 0;
0741     if (!std::is_trivially_destructible<Key>::value) {
0742       if (std::is_same<Key, std::string>::value) {
0743         result |= kKeyIsString;
0744       } else {
0745         return kUseDestructFunc;
0746       }
0747     }
0748     if (!std::is_trivially_destructible<Value>::value) {
0749       if (std::is_same<Value, std::string>::value) {
0750         result |= kValueIsString;
0751       } else if (std::is_base_of<MessageLite, Value>::value) {
0752         result |= kValueIsProto;
0753       } else {
0754         return kUseDestructFunc;
0755       }
0756     }
0757     return result;
0758   }
0759 
0760   struct ClearInput {
0761     MapNodeSizeInfoT size_info;
0762     uint8_t destroy_bits;
0763     bool reset_table;
0764     void (*destroy_node)(NodeBase*);
0765   };
0766 
0767   template <typename Node>
0768   static void DestroyNode(NodeBase* node) {
0769     static_cast<Node*>(node)->~Node();
0770   }
0771 
0772   template <typename Node>
0773   static constexpr ClearInput MakeClearInput(bool reset) {
0774     constexpr auto bits =
0775         MakeDestroyBits<typename Node::key_type, typename Node::mapped_type>();
0776     return ClearInput{Node::size_info(), bits, reset,
0777                       bits & kUseDestructFunc ? DestroyNode<Node> : nullptr};
0778   }
0779 
0780   void ClearTable(ClearInput input);
0781 
0782   NodeAndBucket FindFromTree(map_index_t b, VariantKey key,
0783                              Tree::iterator* it) const;
0784 
0785   // Space used for the table, trees, and nodes.
0786   // Does not include the indirect space used. Eg the data of a std::string.
0787   size_t SpaceUsedInTable(size_t sizeof_node) const;
0788 
0789   map_index_t num_elements_;
0790   map_index_t num_buckets_;
0791   map_index_t seed_;
0792   map_index_t index_of_first_non_null_;
0793   TableEntryPtr* table_;  // an array with num_buckets_ entries
0794   Allocator alloc_;
0795 };
0796 
0797 inline UntypedMapIterator::UntypedMapIterator(const UntypedMapBase* m) : m_(m) {
0798   if (m_->index_of_first_non_null_ == m_->num_buckets_) {
0799     bucket_index_ = 0;
0800     node_ = nullptr;
0801   } else {
0802     bucket_index_ = m_->index_of_first_non_null_;
0803     TableEntryPtr entry = m_->table_[bucket_index_];
0804     node_ = PROTOBUF_PREDICT_TRUE(TableEntryIsList(entry))
0805                 ? TableEntryToNode(entry)
0806                 : TableEntryToTree(entry)->begin()->second;
0807     PROTOBUF_ASSUME(node_ != nullptr);
0808   }
0809 }
0810 
0811 inline void UntypedMapIterator::SearchFrom(map_index_t start_bucket) {
0812   ABSL_DCHECK(m_->index_of_first_non_null_ == m_->num_buckets_ ||
0813               !m_->TableEntryIsEmpty(m_->index_of_first_non_null_));
0814   for (map_index_t i = start_bucket; i < m_->num_buckets_; ++i) {
0815     TableEntryPtr entry = m_->table_[i];
0816     if (entry == TableEntryPtr{}) continue;
0817     bucket_index_ = i;
0818     if (PROTOBUF_PREDICT_TRUE(TableEntryIsList(entry))) {
0819       node_ = TableEntryToNode(entry);
0820     } else {
0821       TreeForMap* tree = TableEntryToTree(entry);
0822       ABSL_DCHECK(!tree->empty());
0823       node_ = tree->begin()->second;
0824     }
0825     return;
0826   }
0827   node_ = nullptr;
0828   bucket_index_ = 0;
0829 }
0830 
0831 // Base class used by TcParser to extract the map object from a map field.
0832 // We keep it here to avoid a dependency into map_field.h from the main TcParser
0833 // code, since that would bring in Message too.
0834 class MapFieldBaseForParse {
0835  public:
0836   const UntypedMapBase& GetMap() const {
0837     return vtable_->get_map(*this, false);
0838   }
0839   UntypedMapBase* MutableMap() {
0840     return &const_cast<UntypedMapBase&>(vtable_->get_map(*this, true));
0841   }
0842 
0843  protected:
0844   struct VTable {
0845     const UntypedMapBase& (*get_map)(const MapFieldBaseForParse&,
0846                                      bool is_mutable);
0847   };
0848   explicit constexpr MapFieldBaseForParse(const VTable* vtable)
0849       : vtable_(vtable) {}
0850   ~MapFieldBaseForParse() = default;
0851 
0852   const VTable* vtable_;
0853 };
0854 
0855 // The value might be of different signedness, so use memcpy to extract it.
0856 template <typename T, std::enable_if_t<std::is_integral<T>::value, int> = 0>
0857 T ReadKey(const void* ptr) {
0858   T out;
0859   memcpy(&out, ptr, sizeof(T));
0860   return out;
0861 }
0862 
0863 template <typename T, std::enable_if_t<!std::is_integral<T>::value, int> = 0>
0864 const T& ReadKey(const void* ptr) {
0865   return *reinterpret_cast<const T*>(ptr);
0866 }
0867 
0868 template <typename Key>
0869 struct KeyNode : NodeBase {
0870   static constexpr size_t kOffset = sizeof(NodeBase);
0871   decltype(auto) key() const { return ReadKey<Key>(GetVoidKey()); }
0872 };
0873 
0874 // KeyMapBase is a chaining hash map with the additional feature that some
0875 // buckets can be converted to use an ordered container.  This ensures O(lg n)
0876 // bounds on find, insert, and erase, while avoiding the overheads of ordered
0877 // containers most of the time.
0878 //
0879 // The implementation doesn't need the full generality of unordered_map,
0880 // and it doesn't have it.  More bells and whistles can be added as needed.
0881 // Some implementation details:
0882 // 1. The number of buckets is a power of two.
0883 // 2. As is typical for hash_map and such, the Keys and Values are always
0884 //    stored in linked list nodes.  Pointers to elements are never invalidated
0885 //    until the element is deleted.
0886 // 3. The trees' payload type is pointer to linked-list node.  Tree-converting
0887 //    a bucket doesn't copy Key-Value pairs.
0888 // 4. Once we've tree-converted a bucket, it is never converted back unless the
0889 //    bucket is completely emptied out. Note that the items a tree contains may
0890 //    wind up assigned to trees or lists upon a rehash.
0891 // 5. Mutations to a map do not invalidate the map's iterators, pointers to
0892 //    elements, or references to elements.
0893 // 6. Except for erase(iterator), any non-const method can reorder iterators.
0894 // 7. Uses VariantKey when using the Tree representation, which holds all
0895 //    possible key types as a variant value.
0896 
0897 template <typename Key>
0898 class KeyMapBase : public UntypedMapBase {
0899   static_assert(!std::is_signed<Key>::value || !std::is_integral<Key>::value,
0900                 "");
0901 
0902   using TS = TransparentSupport<Key>;
0903 
0904  public:
0905   using hasher = typename TS::hash;
0906 
0907   using UntypedMapBase::UntypedMapBase;
0908 
0909  protected:
0910   using KeyNode = internal::KeyNode<Key>;
0911 
0912   // Trees. The payload type is a copy of Key, so that we can query the tree
0913   // with Keys that are not in any particular data structure.
0914   // The value is a void* pointing to Node. We use void* instead of Node* to
0915   // avoid code bloat. That way there is only one instantiation of the tree
0916   // class per key type.
0917   using Tree = internal::TreeForMap;
0918   using TreeIterator = typename Tree::iterator;
0919 
0920  public:
0921   hasher hash_function() const { return {}; }
0922 
0923  protected:
0924   friend class TcParser;
0925   friend struct MapTestPeer;
0926   friend struct MapBenchmarkPeer;
0927 
0928   PROTOBUF_NOINLINE void erase_no_destroy(map_index_t b, KeyNode* node) {
0929     TreeIterator tree_it;
0930     const bool is_list = revalidate_if_necessary(b, node, &tree_it);
0931     if (is_list) {
0932       ABSL_DCHECK(TableEntryIsNonEmptyList(b));
0933       auto* head = TableEntryToNode(table_[b]);
0934       head = EraseFromLinkedList(node, head);
0935       table_[b] = NodeToTableEntry(head);
0936     } else {
0937       EraseFromTree(b, tree_it);
0938     }
0939     --num_elements_;
0940     if (PROTOBUF_PREDICT_FALSE(b == index_of_first_non_null_)) {
0941       while (index_of_first_non_null_ < num_buckets_ &&
0942              TableEntryIsEmpty(index_of_first_non_null_)) {
0943         ++index_of_first_non_null_;
0944       }
0945     }
0946   }
0947 
0948   NodeAndBucket FindHelper(typename TS::ViewType k,
0949                            TreeIterator* it = nullptr) const {
0950     map_index_t b = BucketNumber(k);
0951     if (TableEntryIsNonEmptyList(b)) {
0952       auto* node = internal::TableEntryToNode(table_[b]);
0953       do {
0954         if (TS::Equals(static_cast<KeyNode*>(node)->key(), k)) {
0955           return {node, b};
0956         } else {
0957           node = node->next;
0958         }
0959       } while (node != nullptr);
0960     } else if (TableEntryIsTree(b)) {
0961       return FindFromTree(b, internal::RealKeyToVariantKey<Key>{}(k), it);
0962     }
0963     return {nullptr, b};
0964   }
0965 
0966   // Insert the given node.
0967   // If the key is a duplicate, it inserts the new node and returns the old one.
0968   // Gives ownership to the caller.
0969   // If the key is unique, it returns `nullptr`.
0970   KeyNode* InsertOrReplaceNode(KeyNode* node) {
0971     KeyNode* to_erase = nullptr;
0972     auto p = this->FindHelper(node->key());
0973     map_index_t b = p.bucket;
0974     if (p.node != nullptr) {
0975       erase_no_destroy(p.bucket, static_cast<KeyNode*>(p.node));
0976       to_erase = static_cast<KeyNode*>(p.node);
0977     } else if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) {
0978       b = BucketNumber(node->key());  // bucket_number
0979     }
0980     InsertUnique(b, node);
0981     ++num_elements_;
0982     return to_erase;
0983   }
0984 
0985   // Insert the given Node in bucket b.  If that would make bucket b too big,
0986   // and bucket b is not a tree, create a tree for buckets b.
0987   // Requires count(*KeyPtrFromNodePtr(node)) == 0 and that b is the correct
0988   // bucket.  num_elements_ is not modified.
0989   void InsertUnique(map_index_t b, KeyNode* node) {
0990     ABSL_DCHECK(index_of_first_non_null_ == num_buckets_ ||
0991                 !TableEntryIsEmpty(index_of_first_non_null_));
0992     // In practice, the code that led to this point may have already
0993     // determined whether we are inserting into an empty list, a short list,
0994     // or whatever.  But it's probably cheap enough to recompute that here;
0995     // it's likely that we're inserting into an empty or short list.
0996     ABSL_DCHECK(FindHelper(node->key()).node == nullptr);
0997     if (TableEntryIsEmpty(b)) {
0998       InsertUniqueInList(b, node);
0999       index_of_first_non_null_ = (std::min)(index_of_first_non_null_, b);
1000     } else if (TableEntryIsNonEmptyList(b) && !TableEntryIsTooLong(b)) {
1001       InsertUniqueInList(b, node);
1002     } else {
1003       InsertUniqueInTree(b, NodeToVariantKey, node);
1004     }
1005   }
1006 
1007   static VariantKey NodeToVariantKey(NodeBase* node) {
1008     return internal::RealKeyToVariantKey<Key>{}(
1009         static_cast<KeyNode*>(node)->key());
1010   }
1011 
1012   // Have it a separate function for testing.
1013   static size_type CalculateHiCutoff(size_type num_buckets) {
1014     // We want the high cutoff to follow this rules:
1015     //  - When num_buckets_ == kGlobalEmptyTableSize, then make it 0 to force an
1016     //    allocation.
1017     //  - When num_buckets_ < 8, then make it num_buckets_ to avoid
1018     //    a reallocation. A large load factor is not that important on small
1019     //    tables and saves memory.
1020     //  - Otherwise, make it 75% of num_buckets_.
1021     return num_buckets - num_buckets / 16 * 4 - num_buckets % 2;
1022   }
1023 
1024   // Returns whether it did resize.  Currently this is only used when
1025   // num_elements_ increases, though it could be used in other situations.
1026   // It checks for load too low as well as load too high: because any number
1027   // of erases can occur between inserts, the load could be as low as 0 here.
1028   // Resizing to a lower size is not always helpful, but failing to do so can
1029   // destroy the expected big-O bounds for some operations. By having the
1030   // policy that sometimes we resize down as well as up, clients can easily
1031   // keep O(size()) = O(number of buckets) if they want that.
1032   bool ResizeIfLoadIsOutOfRange(size_type new_size) {
1033     const size_type hi_cutoff = CalculateHiCutoff(num_buckets_);
1034     const size_type lo_cutoff = hi_cutoff / 4;
1035     // We don't care how many elements are in trees.  If a lot are,
1036     // we may resize even though there are many empty buckets.  In
1037     // practice, this seems fine.
1038     if (PROTOBUF_PREDICT_FALSE(new_size > hi_cutoff)) {
1039       if (num_buckets_ <= max_size() / 2) {
1040         Resize(num_buckets_ * 2);
1041         return true;
1042       }
1043     } else if (PROTOBUF_PREDICT_FALSE(new_size <= lo_cutoff &&
1044                                       num_buckets_ > kMinTableSize)) {
1045       size_type lg2_of_size_reduction_factor = 1;
1046       // It's possible we want to shrink a lot here... size() could even be 0.
1047       // So, estimate how much to shrink by making sure we don't shrink so
1048       // much that we would need to grow the table after a few inserts.
1049       const size_type hypothetical_size = new_size * 5 / 4 + 1;
1050       while ((hypothetical_size << lg2_of_size_reduction_factor) < hi_cutoff) {
1051         ++lg2_of_size_reduction_factor;
1052       }
1053       size_type new_num_buckets = std::max<size_type>(
1054           kMinTableSize, num_buckets_ >> lg2_of_size_reduction_factor);
1055       if (new_num_buckets != num_buckets_) {
1056         Resize(new_num_buckets);
1057         return true;
1058       }
1059     }
1060     return false;
1061   }
1062 
1063   // Resize to the given number of buckets.
1064   void Resize(map_index_t new_num_buckets) {
1065     if (num_buckets_ == kGlobalEmptyTableSize) {
1066       // This is the global empty array.
1067       // Just overwrite with a new one. No need to transfer or free anything.
1068       num_buckets_ = index_of_first_non_null_ = kMinTableSize;
1069       table_ = CreateEmptyTable(num_buckets_);
1070       seed_ = Seed();
1071       return;
1072     }
1073 
1074     ABSL_DCHECK_GE(new_num_buckets, kMinTableSize);
1075     const auto old_table = table_;
1076     const map_index_t old_table_size = num_buckets_;
1077     num_buckets_ = new_num_buckets;
1078     table_ = CreateEmptyTable(num_buckets_);
1079     const map_index_t start = index_of_first_non_null_;
1080     index_of_first_non_null_ = num_buckets_;
1081     for (map_index_t i = start; i < old_table_size; ++i) {
1082       if (internal::TableEntryIsNonEmptyList(old_table[i])) {
1083         TransferList(static_cast<KeyNode*>(TableEntryToNode(old_table[i])));
1084       } else if (internal::TableEntryIsTree(old_table[i])) {
1085         this->TransferTree(TableEntryToTree(old_table[i]), NodeToVariantKey);
1086       }
1087     }
1088     DeleteTable(old_table, old_table_size);
1089   }
1090 
1091   // Transfer all nodes in the list `node` into `this`.
1092   void TransferList(KeyNode* node) {
1093     do {
1094       auto* next = static_cast<KeyNode*>(node->next);
1095       InsertUnique(BucketNumber(node->key()), node);
1096       node = next;
1097     } while (node != nullptr);
1098   }
1099 
1100   map_index_t BucketNumber(typename TS::ViewType k) const {
1101     ABSL_DCHECK_EQ(BucketNumberFromHash(hash_function()(k)),
1102                    VariantBucketNumber(RealKeyToVariantKey<Key>{}(k)));
1103     return BucketNumberFromHash(hash_function()(k));
1104   }
1105 
1106   // Assumes node_ and m_ are correct and non-null, but other fields may be
1107   // stale.  Fix them as needed.  Then return true iff node_ points to a
1108   // Node in a list.  If false is returned then *it is modified to be
1109   // a valid iterator for node_.
1110   bool revalidate_if_necessary(map_index_t& bucket_index, KeyNode* node,
1111                                TreeIterator* it) const {
1112     // Force bucket_index to be in range.
1113     bucket_index &= (num_buckets_ - 1);
1114     // Common case: the bucket we think is relevant points to `node`.
1115     if (table_[bucket_index] == NodeToTableEntry(node)) return true;
1116     // Less common: the bucket is a linked list with node_ somewhere in it,
1117     // but not at the head.
1118     if (TableEntryIsNonEmptyList(bucket_index)) {
1119       auto* l = TableEntryToNode(table_[bucket_index]);
1120       while ((l = l->next) != nullptr) {
1121         if (l == node) {
1122           return true;
1123         }
1124       }
1125     }
1126     // Well, bucket_index_ still might be correct, but probably
1127     // not.  Revalidate just to be sure.  This case is rare enough that we
1128     // don't worry about potential optimizations, such as having a custom
1129     // find-like method that compares Node* instead of the key.
1130     auto res = FindHelper(node->key(), it);
1131     bucket_index = res.bucket;
1132     return TableEntryIsList(bucket_index);
1133   }
1134 };
1135 
1136 template <typename T, typename K>
1137 bool InitializeMapKey(T*, K&&, Arena*) {
1138   return false;
1139 }
1140 
1141 
1142 }  // namespace internal
1143 
1144 // This is the class for Map's internal value_type.
1145 template <typename Key, typename T>
1146 using MapPair = std::pair<const Key, T>;
1147 
1148 // Map is an associative container type used to store protobuf map
1149 // fields.  Each Map instance may or may not use a different hash function, a
1150 // different iteration order, and so on.  E.g., please don't examine
1151 // implementation details to decide if the following would work:
1152 //  Map<int, int> m0, m1;
1153 //  m0[0] = m1[0] = m0[1] = m1[1] = 0;
1154 //  assert(m0.begin()->first == m1.begin()->first);  // Bug!
1155 //
1156 // Map's interface is similar to std::unordered_map, except that Map is not
1157 // designed to play well with exceptions.
1158 template <typename Key, typename T>
1159 class Map : private internal::KeyMapBase<internal::KeyForBase<Key>> {
1160   using Base = typename Map::KeyMapBase;
1161 
1162   using TS = internal::TransparentSupport<Key>;
1163 
1164  public:
1165   using key_type = Key;
1166   using mapped_type = T;
1167   using init_type = std::pair<Key, T>;
1168   using value_type = MapPair<Key, T>;
1169 
1170   using pointer = value_type*;
1171   using const_pointer = const value_type*;
1172   using reference = value_type&;
1173   using const_reference = const value_type&;
1174 
1175   using size_type = size_t;
1176   using hasher = typename TS::hash;
1177 
1178   constexpr Map() : Base(nullptr) { StaticValidityCheck(); }
1179   Map(const Map& other) : Map(nullptr, other) {}
1180 
1181   // Internal Arena constructors: do not use!
1182   // TODO: remove non internal ctors
1183   explicit Map(Arena* arena) : Base(arena) { StaticValidityCheck(); }
1184   Map(internal::InternalVisibility, Arena* arena) : Map(arena) {}
1185   Map(internal::InternalVisibility, Arena* arena, const Map& other)
1186       : Map(arena, other) {}
1187 
1188   Map(Map&& other) noexcept : Map() {
1189     if (other.arena() != nullptr) {
1190       *this = other;
1191     } else {
1192       swap(other);
1193     }
1194   }
1195 
1196   Map& operator=(Map&& other) noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
1197     if (this != &other) {
1198       if (arena() != other.arena()) {
1199         *this = other;
1200       } else {
1201         swap(other);
1202       }
1203     }
1204     return *this;
1205   }
1206 
1207   template <class InputIt>
1208   Map(const InputIt& first, const InputIt& last) : Map() {
1209     insert(first, last);
1210   }
1211 
1212   ~Map() {
1213     // Fail-safe in case we miss calling this in a constructor.  Note: this one
1214     // won't trigger for leaked maps that never get destructed.
1215     StaticValidityCheck();
1216 
1217     if (this->num_buckets_ != internal::kGlobalEmptyTableSize) {
1218       this->ClearTable(this->template MakeClearInput<Node>(false));
1219     }
1220   }
1221 
1222  private:
1223   Map(Arena* arena, const Map& other) : Base(arena) {
1224     StaticValidityCheck();
1225     insert(other.begin(), other.end());
1226   }
1227   static_assert(!std::is_const<mapped_type>::value &&
1228                     !std::is_const<key_type>::value,
1229                 "We do not support const types.");
1230   static_assert(!std::is_volatile<mapped_type>::value &&
1231                     !std::is_volatile<key_type>::value,
1232                 "We do not support volatile types.");
1233   static_assert(!std::is_pointer<mapped_type>::value &&
1234                     !std::is_pointer<key_type>::value,
1235                 "We do not support pointer types.");
1236   static_assert(!std::is_reference<mapped_type>::value &&
1237                     !std::is_reference<key_type>::value,
1238                 "We do not support reference types.");
1239   static constexpr PROTOBUF_ALWAYS_INLINE void StaticValidityCheck() {
1240     static_assert(alignof(internal::NodeBase) >= alignof(mapped_type),
1241                   "Alignment of mapped type is too high.");
1242     static_assert(
1243         absl::disjunction<internal::is_supported_integral_type<key_type>,
1244                           internal::is_supported_string_type<key_type>,
1245                           internal::is_internal_map_key_type<key_type>>::value,
1246         "We only support integer, string, or designated internal key "
1247         "types.");
1248     static_assert(absl::disjunction<
1249                       internal::is_supported_scalar_type<mapped_type>,
1250                       is_proto_enum<mapped_type>,
1251                       internal::is_supported_message_type<mapped_type>,
1252                       internal::is_internal_map_value_type<mapped_type>>::value,
1253                   "We only support scalar, Message, and designated internal "
1254                   "mapped types.");
1255   }
1256 
1257   template <typename P>
1258   struct SameAsElementReference
1259       : std::is_same<typename std::remove_cv<
1260                          typename std::remove_reference<reference>::type>::type,
1261                      typename std::remove_cv<
1262                          typename std::remove_reference<P>::type>::type> {};
1263 
1264   template <class P>
1265   using RequiresInsertable =
1266       typename std::enable_if<std::is_convertible<P, init_type>::value ||
1267                                   SameAsElementReference<P>::value,
1268                               int>::type;
1269   template <class P>
1270   using RequiresNotInit =
1271       typename std::enable_if<!std::is_same<P, init_type>::value, int>::type;
1272 
1273   template <typename LookupKey>
1274   using key_arg = typename TS::template key_arg<LookupKey>;
1275 
1276  public:
1277   // Iterators
1278   class const_iterator : private internal::UntypedMapIterator {
1279     using BaseIt = internal::UntypedMapIterator;
1280 
1281    public:
1282     using iterator_category = std::forward_iterator_tag;
1283     using value_type = typename Map::value_type;
1284     using difference_type = ptrdiff_t;
1285     using pointer = const value_type*;
1286     using reference = const value_type&;
1287 
1288     const_iterator() {}
1289     const_iterator(const const_iterator&) = default;
1290     const_iterator& operator=(const const_iterator&) = default;
1291 
1292     reference operator*() const { return static_cast<Node*>(this->node_)->kv; }
1293     pointer operator->() const { return &(operator*()); }
1294 
1295     const_iterator& operator++() {
1296       this->PlusPlus();
1297       return *this;
1298     }
1299     const_iterator operator++(int) {
1300       auto copy = *this;
1301       this->PlusPlus();
1302       return copy;
1303     }
1304 
1305     friend bool operator==(const const_iterator& a, const const_iterator& b) {
1306       return a.Equals(b);
1307     }
1308     friend bool operator!=(const const_iterator& a, const const_iterator& b) {
1309       return !a.Equals(b);
1310     }
1311 
1312    private:
1313     using BaseIt::BaseIt;
1314     explicit const_iterator(const BaseIt& base) : BaseIt(base) {}
1315     friend class Map;
1316     friend class internal::UntypedMapIterator;
1317     friend class internal::TypeDefinedMapFieldBase<Key, T>;
1318   };
1319 
1320   class iterator : private internal::UntypedMapIterator {
1321     using BaseIt = internal::UntypedMapIterator;
1322 
1323    public:
1324     using iterator_category = std::forward_iterator_tag;
1325     using value_type = typename Map::value_type;
1326     using difference_type = ptrdiff_t;
1327     using pointer = value_type*;
1328     using reference = value_type&;
1329 
1330     iterator() {}
1331     iterator(const iterator&) = default;
1332     iterator& operator=(const iterator&) = default;
1333 
1334     reference operator*() const { return static_cast<Node*>(this->node_)->kv; }
1335     pointer operator->() const { return &(operator*()); }
1336 
1337     iterator& operator++() {
1338       this->PlusPlus();
1339       return *this;
1340     }
1341     iterator operator++(int) {
1342       auto copy = *this;
1343       this->PlusPlus();
1344       return copy;
1345     }
1346 
1347     // Allow implicit conversion to const_iterator.
1348     operator const_iterator() const {  // NOLINT(runtime/explicit)
1349       return const_iterator(static_cast<const BaseIt&>(*this));
1350     }
1351 
1352     friend bool operator==(const iterator& a, const iterator& b) {
1353       return a.Equals(b);
1354     }
1355     friend bool operator!=(const iterator& a, const iterator& b) {
1356       return !a.Equals(b);
1357     }
1358 
1359    private:
1360     using BaseIt::BaseIt;
1361     friend class Map;
1362   };
1363 
1364   iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { return iterator(this); }
1365   iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { return iterator(); }
1366   const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1367     return const_iterator(this);
1368   }
1369   const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1370     return const_iterator();
1371   }
1372   const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1373     return begin();
1374   }
1375   const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
1376 
1377   using Base::empty;
1378   using Base::size;
1379 
1380   // Element access
1381   template <typename K = key_type>
1382   T& operator[](const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1383     return try_emplace(key).first->second;
1384   }
1385   template <
1386       typename K = key_type,
1387       // Disable for integral types to reduce code bloat.
1388       typename = typename std::enable_if<!std::is_integral<K>::value>::type>
1389   T& operator[](key_arg<K>&& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1390     return try_emplace(std::forward<K>(key)).first->second;
1391   }
1392 
1393   template <typename K = key_type>
1394   const T& at(const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1395     const_iterator it = find(key);
1396     ABSL_CHECK(it != end()) << "key not found: " << static_cast<Key>(key);
1397     return it->second;
1398   }
1399 
1400   template <typename K = key_type>
1401   T& at(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1402     iterator it = find(key);
1403     ABSL_CHECK(it != end()) << "key not found: " << static_cast<Key>(key);
1404     return it->second;
1405   }
1406 
1407   // Lookup
1408   template <typename K = key_type>
1409   size_type count(const key_arg<K>& key) const {
1410     return find(key) == end() ? 0 : 1;
1411   }
1412 
1413   template <typename K = key_type>
1414   const_iterator find(const key_arg<K>& key) const
1415       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1416     return const_cast<Map*>(this)->find(key);
1417   }
1418   template <typename K = key_type>
1419   iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1420     auto res = this->FindHelper(TS::ToView(key));
1421     return iterator(static_cast<Node*>(res.node), this, res.bucket);
1422   }
1423 
1424   template <typename K = key_type>
1425   bool contains(const key_arg<K>& key) const {
1426     return find(key) != end();
1427   }
1428 
1429   template <typename K = key_type>
1430   std::pair<const_iterator, const_iterator> equal_range(
1431       const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1432     const_iterator it = find(key);
1433     if (it == end()) {
1434       return std::pair<const_iterator, const_iterator>(it, it);
1435     } else {
1436       const_iterator begin = it++;
1437       return std::pair<const_iterator, const_iterator>(begin, it);
1438     }
1439   }
1440 
1441   template <typename K = key_type>
1442   std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
1443       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1444     iterator it = find(key);
1445     if (it == end()) {
1446       return std::pair<iterator, iterator>(it, it);
1447     } else {
1448       iterator begin = it++;
1449       return std::pair<iterator, iterator>(begin, it);
1450     }
1451   }
1452 
1453   // insert
1454   template <typename K, typename... Args>
1455   std::pair<iterator, bool> try_emplace(K&& k, Args&&... args)
1456       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1457     // Inserts a new element into the container if there is no element with the
1458     // key in the container.
1459     // The new element is:
1460     //  (1) Constructed in-place with the given args, if mapped_type is not
1461     //      arena constructible.
1462     //  (2) Constructed in-place with the arena and then assigned with a
1463     //      mapped_type temporary constructed with the given args, otherwise.
1464     return ArenaAwareTryEmplace(Arena::is_arena_constructable<mapped_type>(),
1465                                 std::forward<K>(k),
1466                                 std::forward<Args>(args)...);
1467   }
1468   std::pair<iterator, bool> insert(init_type&& value)
1469       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1470     return try_emplace(std::move(value.first), std::move(value.second));
1471   }
1472   template <typename P, RequiresInsertable<P> = 0>
1473   std::pair<iterator, bool> insert(P&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1474     return try_emplace(std::forward<P>(value).first,
1475                        std::forward<P>(value).second);
1476   }
1477   template <typename... Args>
1478   std::pair<iterator, bool> emplace(Args&&... args)
1479       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1480     return EmplaceInternal(Rank0{}, std::forward<Args>(args)...);
1481   }
1482   template <class InputIt>
1483   void insert(InputIt first, InputIt last) {
1484     for (; first != last; ++first) {
1485       auto&& pair = *first;
1486       try_emplace(pair.first, pair.second);
1487     }
1488   }
1489   void insert(std::initializer_list<init_type> values) {
1490     insert(values.begin(), values.end());
1491   }
1492   template <typename P, RequiresNotInit<P> = 0,
1493             RequiresInsertable<const P&> = 0>
1494   void insert(std::initializer_list<P> values) {
1495     insert(values.begin(), values.end());
1496   }
1497 
1498   // Erase and clear
1499   template <typename K = key_type>
1500   size_type erase(const key_arg<K>& key) {
1501     iterator it = find(key);
1502     if (it == end()) {
1503       return 0;
1504     } else {
1505       erase(it);
1506       return 1;
1507     }
1508   }
1509 
1510   iterator erase(iterator pos) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1511     auto next = std::next(pos);
1512     ABSL_DCHECK_EQ(pos.m_, static_cast<Base*>(this));
1513     auto* node = static_cast<Node*>(pos.node_);
1514     this->erase_no_destroy(pos.bucket_index_, node);
1515     DestroyNode(node);
1516     return next;
1517   }
1518 
1519   void erase(iterator first, iterator last) {
1520     while (first != last) {
1521       first = erase(first);
1522     }
1523   }
1524 
1525   void clear() {
1526     if (this->num_buckets_ == internal::kGlobalEmptyTableSize) return;
1527     this->ClearTable(this->template MakeClearInput<Node>(true));
1528   }
1529 
1530   // Assign
1531   Map& operator=(const Map& other) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1532     if (this != &other) {
1533       clear();
1534       insert(other.begin(), other.end());
1535     }
1536     return *this;
1537   }
1538 
1539   void swap(Map& other) {
1540     if (arena() == other.arena()) {
1541       InternalSwap(&other);
1542     } else {
1543       // TODO: optimize this. The temporary copy can be allocated
1544       // in the same arena as the other message, and the "other = copy" can
1545       // be replaced with the fast-path swap above.
1546       Map copy = *this;
1547       *this = other;
1548       other = copy;
1549     }
1550   }
1551 
1552   void InternalSwap(Map* other) {
1553     internal::UntypedMapBase::InternalSwap(other);
1554   }
1555 
1556   hasher hash_function() const { return {}; }
1557 
1558   size_t SpaceUsedExcludingSelfLong() const {
1559     if (empty()) return 0;
1560     return SpaceUsedInternal() + internal::SpaceUsedInValues(this);
1561   }
1562 
1563  private:
1564   struct Rank1 {};
1565   struct Rank0 : Rank1 {};
1566 
1567   // Linked-list nodes, as one would expect for a chaining hash table.
1568   struct Node : Base::KeyNode {
1569     using key_type = Key;
1570     using mapped_type = T;
1571     static constexpr internal::MapNodeSizeInfoT size_info() {
1572       return internal::MakeNodeInfo(sizeof(Node),
1573                                     PROTOBUF_FIELD_OFFSET(Node, kv.second));
1574     }
1575     value_type kv;
1576   };
1577 
1578   using Tree = internal::TreeForMap;
1579   using TreeIterator = typename Tree::iterator;
1580   using TableEntryPtr = internal::TableEntryPtr;
1581 
1582   static Node* NodeFromTreeIterator(TreeIterator it) {
1583     static_assert(
1584         PROTOBUF_FIELD_OFFSET(Node, kv.first) == Base::KeyNode::kOffset, "");
1585     static_assert(alignof(Node) == alignof(internal::NodeBase), "");
1586     return static_cast<Node*>(it->second);
1587   }
1588 
1589   void DestroyNode(Node* node) {
1590     if (this->alloc_.arena() == nullptr) {
1591       node->kv.first.~key_type();
1592       node->kv.second.~mapped_type();
1593       this->DeallocNode(node, sizeof(Node));
1594     }
1595   }
1596 
1597   size_t SpaceUsedInternal() const {
1598     return this->SpaceUsedInTable(sizeof(Node));
1599   }
1600 
1601   // We try to construct `init_type` from `Args` with a fall back to
1602   // `value_type`. The latter is less desired as it unconditionally makes a copy
1603   // of `value_type::first`.
1604   template <typename... Args>
1605   auto EmplaceInternal(Rank0, Args&&... args) ->
1606       typename std::enable_if<std::is_constructible<init_type, Args...>::value,
1607                               std::pair<iterator, bool>>::type {
1608     return insert(init_type(std::forward<Args>(args)...));
1609   }
1610   template <typename... Args>
1611   std::pair<iterator, bool> EmplaceInternal(Rank1, Args&&... args) {
1612     return insert(value_type(std::forward<Args>(args)...));
1613   }
1614 
1615   template <typename K, typename... Args>
1616   std::pair<iterator, bool> TryEmplaceInternal(K&& k, Args&&... args) {
1617     auto p = this->FindHelper(TS::ToView(k));
1618     internal::map_index_t b = p.bucket;
1619     // Case 1: key was already present.
1620     if (p.node != nullptr)
1621       return std::make_pair(
1622           iterator(static_cast<Node*>(p.node), this, p.bucket), false);
1623     // Case 2: insert.
1624     if (this->ResizeIfLoadIsOutOfRange(this->num_elements_ + 1)) {
1625       b = this->BucketNumber(TS::ToView(k));
1626     }
1627     // If K is not key_type, make the conversion to key_type explicit.
1628     using TypeToInit = typename std::conditional<
1629         std::is_same<typename std::decay<K>::type, key_type>::value, K&&,
1630         key_type>::type;
1631     Node* node = static_cast<Node*>(this->AllocNode(sizeof(Node)));
1632 
1633     // Even when arena is nullptr, CreateInArenaStorage is still used to
1634     // ensure the arena of submessage will be consistent. Otherwise,
1635     // submessage may have its own arena when message-owned arena is enabled.
1636     // Note: This only works if `Key` is not arena constructible.
1637     if (!internal::InitializeMapKey(const_cast<Key*>(&node->kv.first),
1638                                     std::forward<K>(k), this->alloc_.arena())) {
1639       Arena::CreateInArenaStorage(const_cast<Key*>(&node->kv.first),
1640                                   this->alloc_.arena(),
1641                                   static_cast<TypeToInit>(std::forward<K>(k)));
1642     }
1643     // Note: if `T` is arena constructible, `Args` needs to be empty.
1644     Arena::CreateInArenaStorage(&node->kv.second, this->alloc_.arena(),
1645                                 std::forward<Args>(args)...);
1646 
1647     this->InsertUnique(b, node);
1648     ++this->num_elements_;
1649     return std::make_pair(iterator(node, this, b), true);
1650   }
1651 
1652   // A helper function to perform an assignment of `mapped_type`.
1653   // If the first argument is true, then it is a regular assignment.
1654   // Otherwise, we first create a temporary and then perform an assignment.
1655   template <typename V>
1656   static void AssignMapped(std::true_type, mapped_type& mapped, V&& v) {
1657     mapped = std::forward<V>(v);
1658   }
1659   template <typename... Args>
1660   static void AssignMapped(std::false_type, mapped_type& mapped,
1661                            Args&&... args) {
1662     mapped = mapped_type(std::forward<Args>(args)...);
1663   }
1664 
1665   // Case 1: `mapped_type` is arena constructible. A temporary object is
1666   // created and then (if `Args` are not empty) assigned to a mapped value
1667   // that was created with the arena.
1668   template <typename K>
1669   std::pair<iterator, bool> ArenaAwareTryEmplace(std::true_type, K&& k) {
1670     // case 1.1: "default" constructed (e.g. from arena only).
1671     return TryEmplaceInternal(std::forward<K>(k));
1672   }
1673   template <typename K, typename... Args>
1674   std::pair<iterator, bool> ArenaAwareTryEmplace(std::true_type, K&& k,
1675                                                  Args&&... args) {
1676     // case 1.2: "default" constructed + copy/move assignment
1677     auto p = TryEmplaceInternal(std::forward<K>(k));
1678     if (p.second) {
1679       AssignMapped(std::is_same<void(typename std::decay<Args>::type...),
1680                                 void(mapped_type)>(),
1681                    p.first->second, std::forward<Args>(args)...);
1682     }
1683     return p;
1684   }
1685   // Case 2: `mapped_type` is not arena constructible. Using in-place
1686   // construction.
1687   template <typename... Args>
1688   std::pair<iterator, bool> ArenaAwareTryEmplace(std::false_type,
1689                                                  Args&&... args) {
1690     return TryEmplaceInternal(std::forward<Args>(args)...);
1691   }
1692 
1693   using Base::arena;
1694 
1695   friend class Arena;
1696   template <typename, typename>
1697   friend class internal::TypeDefinedMapFieldBase;
1698   using InternalArenaConstructable_ = void;
1699   using DestructorSkippable_ = void;
1700   template <typename K, typename V>
1701   friend class internal::MapFieldLite;
1702   friend class internal::TcParser;
1703   friend struct internal::MapTestPeer;
1704   friend struct internal::MapBenchmarkPeer;
1705 };
1706 
1707 namespace internal {
1708 template <typename... T>
1709 PROTOBUF_NOINLINE void MapMergeFrom(Map<T...>& dest, const Map<T...>& src) {
1710   for (const auto& elem : src) {
1711     dest[elem.first] = elem.second;
1712   }
1713 }
1714 }  // namespace internal
1715 
1716 }  // namespace protobuf
1717 }  // namespace google
1718 
1719 #include "google/protobuf/port_undef.inc"
1720 
1721 #endif  // GOOGLE_PROTOBUF_MAP_H__