Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 10:13:54

0001 //                 ______  _____                 ______                _________
0002 //  ______________ ___  /_ ___(_)_______         ___  /_ ______ ______ ______  /
0003 //  __  ___/_  __ \__  __ \__  / __  __ \        __  __ \_  __ \_  __ \_  __  /
0004 //  _  /    / /_/ /_  /_/ /_  /  _  / / /        _  / / // /_/ // /_/ // /_/ /
0005 //  /_/     \____/ /_.___/ /_/   /_/ /_/ ________/_/ /_/ \____/ \____/ \__,_/
0006 //                                      _/_____/
0007 //
0008 // Fast & memory efficient hashtable based on robin hood hashing for C++11/14/17/20
0009 // version 3.4.1
0010 // https://github.com/martinus/robin-hood-hashing
0011 //
0012 // Licensed under the MIT License <http://opensource.org/licenses/MIT>.
0013 // SPDX-License-Identifier: MIT
0014 // Copyright (c) 2018-2019 Martin Ankerl <http://martin.ankerl.com>
0015 //
0016 // Permission is hereby granted, free of charge, to any person obtaining a copy
0017 // of this software and associated documentation files (the "Software"), to deal
0018 // in the Software without restriction, including without limitation the rights
0019 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
0020 // copies of the Software, and to permit persons to whom the Software is
0021 // furnished to do so, subject to the following conditions:
0022 //
0023 // The above copyright notice and this permission notice shall be included in all
0024 // copies or substantial portions of the Software.
0025 //
0026 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0027 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0028 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
0029 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0030 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
0031 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0032 // SOFTWARE.
0033 
0034 #ifndef ROBIN_HOOD_H_INCLUDED
0035 #define ROBIN_HOOD_H_INCLUDED
0036 
0037 // see https://semver.org/
0038 #define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes
0039 #define ROBIN_HOOD_VERSION_MINOR 4 // for adding functionality in a backwards-compatible manner
0040 #define ROBIN_HOOD_VERSION_PATCH 1 // for backwards-compatible bug fixes
0041 
0042 #include <algorithm>
0043 #include <cstdlib>
0044 #include <cstring>
0045 #include <functional>
0046 #include <stdexcept>
0047 #include <string>
0048 #include <type_traits>
0049 #include <utility>
0050 
0051 // #define ROBIN_HOOD_LOG_ENABLED
0052 #ifdef ROBIN_HOOD_LOG_ENABLED
0053 #include <iostream>
0054 #define ROBIN_HOOD_LOG(x) std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl
0055 #else
0056 #define ROBIN_HOOD_LOG(x)
0057 #endif
0058 
0059 // #define ROBIN_HOOD_TRACE_ENABLED
0060 #ifdef ROBIN_HOOD_TRACE_ENABLED
0061 #include <iostream>
0062 #define ROBIN_HOOD_TRACE(x) std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl
0063 #else
0064 #define ROBIN_HOOD_TRACE(x)
0065 #endif
0066 
0067 // all non-argument macros should use this facility. See
0068 // https://www.fluentcpp.com/2019/05/28/better-macros-better-flags/
0069 #define ROBIN_HOOD(x) ROBIN_HOOD_PRIVATE_DEFINITION_##x()
0070 
0071 // mark unused members with this macro
0072 #define ROBIN_HOOD_UNUSED(identifier)
0073 
0074 // bitness
0075 #if SIZE_MAX == UINT32_MAX
0076 #define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 32
0077 #elif SIZE_MAX == UINT64_MAX
0078 #define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 64
0079 #else
0080 #error Unsupported bitness
0081 #endif
0082 
0083 // endianess
0084 #ifdef _WIN32
0085 #define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() 1
0086 #define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() 0
0087 #else
0088 #define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
0089 #define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
0090 #endif
0091 
0092 // inline
0093 #ifdef _WIN32
0094 #define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __declspec(noinline)
0095 #else
0096 #define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __attribute__((noinline))
0097 #endif
0098 
0099 // exceptions
0100 #if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND)
0101 #define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 0
0102 #else
0103 #define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 1
0104 #endif
0105 
0106 // count leading/trailing bits
0107 #ifdef _WIN32
0108 #if ROBIN_HOOD(BITNESS) == 32
0109 #define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward
0110 #else
0111 #define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward64
0112 #endif
0113 #include <intrin.h>
0114 #pragma intrinsic(ROBIN_HOOD(BITSCANFORWARD))
0115 #define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x)                                                          \
0116   [](size_t mask) noexcept->int                                                                      \
0117   {                                                                                                  \
0118     unsigned long index;                                                                             \
0119     return ROBIN_HOOD(BITSCANFORWARD)(&index, mask) ? static_cast<int>(index) : ROBIN_HOOD(BITNESS); \
0120   }                                                                                                  \
0121   (x)
0122 #else
0123 #if ROBIN_HOOD(BITNESS) == 32
0124 #define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzl
0125 #define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzl
0126 #else
0127 #define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzll
0128 #define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzll
0129 #endif
0130 #define ROBIN_HOOD_COUNT_LEADING_ZEROES(x) ((x) ? ROBIN_HOOD(CLZ)(x) : ROBIN_HOOD(BITNESS))
0131 #define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ((x) ? ROBIN_HOOD(CTZ)(x) : ROBIN_HOOD(BITNESS))
0132 #endif
0133 
0134 // fallthrough
0135 #ifndef __has_cpp_attribute // For backwards compatibility
0136 #define __has_cpp_attribute(x) 0
0137 #endif
0138 #if __has_cpp_attribute(clang::fallthrough) && !defined(__INTEL_COMPILER)
0139 #define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[clang::fallthrough]]
0140 #elif __has_cpp_attribute(gnu::fallthrough)
0141 #define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[gnu::fallthrough]]
0142 #else
0143 #define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH()
0144 #endif
0145 
0146 // likely/unlikely
0147 #if defined(_WIN32)
0148 #define ROBIN_HOOD_LIKELY(condition) condition
0149 #define ROBIN_HOOD_UNLIKELY(condition) condition
0150 #else
0151 #define ROBIN_HOOD_LIKELY(condition) __builtin_expect(condition, 1)
0152 #define ROBIN_HOOD_UNLIKELY(condition) __builtin_expect(condition, 0)
0153 #endif
0154 
0155 #define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
0156 
0157 // helpers for C++ versions, see https://gcc.gnu.org/onlinedocs/cpp/Standard-Predefined-Macros.html
0158 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX() __cplusplus
0159 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX98() 199711L
0160 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX11() 201103L
0161 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX14() 201402L
0162 #define ROBIN_HOOD_PRIVATE_DEFINITION_CXX17() 201703L
0163 
0164 #if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17)
0165 #define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() [[nodiscard]]
0166 #else
0167 #define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD()
0168 #endif
0169 
0170 namespace robin_hood {
0171 
0172 #if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
0173 #define ROBIN_HOOD_STD std
0174 #else
0175 
0176 // c++11 compatibility layer
0177 namespace ROBIN_HOOD_STD {
0178 template <class T>
0179 struct alignment_of : std::integral_constant<std::size_t, alignof(typename std::remove_all_extents<T>::type)> {
0180 };
0181 
0182 template <class T, T... Ints>
0183 class integer_sequence {
0184 public:
0185   using value_type = T;
0186   static_assert(std::is_integral<value_type>::value, "not integral type");
0187   static constexpr std::size_t size() noexcept { return sizeof...(Ints); }
0188 };
0189 template <std::size_t... Inds>
0190 using index_sequence = integer_sequence<std::size_t, Inds...>;
0191 
0192 namespace detail_ {
0193 template <class T, T Begin, T End, bool>
0194 struct IntSeqImpl {
0195   using TValue = T;
0196   static_assert(std::is_integral<TValue>::value, "not integral type");
0197   static_assert(Begin >= 0 && Begin < End, "unexpected argument (Begin<0 || Begin<=End)");
0198 
0199   template <class, class>
0200   struct IntSeqCombiner;
0201 
0202   template <TValue... Inds0, TValue... Inds1>
0203   struct IntSeqCombiner<integer_sequence<TValue, Inds0...>, integer_sequence<TValue, Inds1...>> {
0204     using TResult = integer_sequence<TValue, Inds0..., Inds1...>;
0205   };
0206 
0207   using TResult = typename IntSeqCombiner<
0208       typename IntSeqImpl<TValue, Begin, Begin + (End - Begin) / 2, (End - Begin) / 2 == 1>::TResult,
0209       typename IntSeqImpl<TValue, Begin + (End - Begin) / 2, End, (End - Begin + 1) / 2 == 1>::TResult>::TResult;
0210 };
0211 
0212 template <class T, T Begin>
0213 struct IntSeqImpl<T, Begin, Begin, false> {
0214   using TValue = T;
0215   static_assert(std::is_integral<TValue>::value, "not integral type");
0216   static_assert(Begin >= 0, "unexpected argument (Begin<0)");
0217   using TResult = integer_sequence<TValue>;
0218 };
0219 
0220 template <class T, T Begin, T End>
0221 struct IntSeqImpl<T, Begin, End, true> {
0222   using TValue = T;
0223   static_assert(std::is_integral<TValue>::value, "not integral type");
0224   static_assert(Begin >= 0, "unexpected argument (Begin<0)");
0225   using TResult = integer_sequence<TValue, Begin>;
0226 };
0227 } // namespace detail_
0228 
0229 template <class T, T N>
0230 using make_integer_sequence = typename detail_::IntSeqImpl<T, 0, N, (N - 0) == 1>::TResult;
0231 
0232 template <std::size_t N>
0233 using make_index_sequence = make_integer_sequence<std::size_t, N>;
0234 
0235 template <class... T>
0236 using index_sequence_for = make_index_sequence<sizeof...(T)>;
0237 
0238 } // namespace ROBIN_HOOD_STD
0239 
0240 #endif
0241 
0242 namespace detail {
0243 
0244 // umul
0245 #if defined(__SIZEOF_INT128__)
0246 #define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_UMUL128() 1
0247 #if defined(__GNUC__) || defined(__clang__)
0248 #pragma GCC diagnostic push
0249 #pragma GCC diagnostic ignored "-Wpedantic"
0250 using uint128_t = unsigned __int128;
0251 #pragma GCC diagnostic pop
0252 #endif
0253 inline uint64_t umul128(uint64_t a, uint64_t b, uint64_t *high) noexcept
0254 {
0255   auto result = static_cast<uint128_t>(a) * static_cast<uint128_t>(b);
0256   *high       = static_cast<uint64_t>(result >> 64U);
0257   return static_cast<uint64_t>(result);
0258 }
0259 #elif (defined(_WIN32) && ROBIN_HOOD(BITNESS) == 64)
0260 #define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_UMUL128() 1
0261 #include <intrin.h> // for __umulh
0262 #pragma intrinsic(__umulh)
0263 #ifndef _M_ARM64
0264 #pragma intrinsic(_umul128)
0265 #endif
0266 inline uint64_t umul128(uint64_t a, uint64_t b, uint64_t *high) noexcept
0267 {
0268 #ifdef _M_ARM64
0269   *high = __umulh(a, b);
0270   return ((uint64_t)(a)) * (b);
0271 #else
0272   return _umul128(a, b, high);
0273 #endif
0274 }
0275 #else
0276 #define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_UMUL128() 0
0277 #endif
0278 
0279 // This cast gets rid of warnings like "cast from 'uint8_t*' {aka 'unsigned char*'} to
0280 // 'uint64_t*' {aka 'long unsigned int*'} increases required alignment of target type". Use with
0281 // care!
0282 template <typename T>
0283 inline T reinterpret_cast_no_cast_align_warning(void *ptr) noexcept
0284 {
0285   return reinterpret_cast<T>(ptr);
0286 }
0287 
0288 template <typename T>
0289 inline T reinterpret_cast_no_cast_align_warning(void const *ptr) noexcept
0290 {
0291   return reinterpret_cast<T>(ptr);
0292 }
0293 
0294 // make sure this is not inlined as it is slow and dramatically enlarges code, thus making other
0295 // inlinings more difficult. Throws are also generally the slow path.
0296 template <typename E, typename... Args>
0297 ROBIN_HOOD(NOINLINE)
0298 #if ROBIN_HOOD(HAS_EXCEPTIONS)
0299 void doThrow(Args &&... args)
0300 {
0301   // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay)
0302   throw E(std::forward<Args>(args)...);
0303 }
0304 #else
0305 void doThrow(Args &&... ROBIN_HOOD_UNUSED(args) /*unused*/)
0306 {
0307   abort();
0308 }
0309 #endif
0310 
0311 template <typename E, typename T, typename... Args>
0312 T *assertNotNull(T *t, Args &&... args)
0313 {
0314   if (ROBIN_HOOD_UNLIKELY(nullptr == t)) {
0315     doThrow<E>(std::forward<Args>(args)...);
0316   }
0317   return t;
0318 }
0319 
0320 template <typename T>
0321 inline T unaligned_load(void const *ptr) noexcept
0322 {
0323   // using memcpy so we don't get into unaligned load problems.
0324   // compiler should optimize this very well anyways.
0325   T t;
0326   std::memcpy(&t, ptr, sizeof(T));
0327   return t;
0328 }
0329 
0330 // Allocates bulks of memory for objects of type T. This deallocates the memory in the destructor,
0331 // and keeps a linked list of the allocated memory around. Overhead per allocation is the size of a
0332 // pointer.
0333 template <typename T, size_t MinNumAllocs = 4, size_t MaxNumAllocs = 256>
0334 class BulkPoolAllocator {
0335 public:
0336   BulkPoolAllocator() noexcept = default;
0337 
0338   // does not copy anything, just creates a new allocator.
0339   BulkPoolAllocator(const BulkPoolAllocator &ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept
0340       : mHead(nullptr), mListForFree(nullptr)
0341   {
0342   }
0343 
0344   BulkPoolAllocator(BulkPoolAllocator &&o) noexcept : mHead(o.mHead), mListForFree(o.mListForFree)
0345   {
0346     o.mListForFree = nullptr;
0347     o.mHead        = nullptr;
0348   }
0349 
0350   BulkPoolAllocator &operator=(BulkPoolAllocator &&o) noexcept
0351   {
0352     reset();
0353     mHead          = o.mHead;
0354     mListForFree   = o.mListForFree;
0355     o.mListForFree = nullptr;
0356     o.mHead        = nullptr;
0357     return *this;
0358   }
0359 
0360   BulkPoolAllocator &operator=(const BulkPoolAllocator &ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept
0361   {
0362     // does not do anything
0363     return *this;
0364   }
0365 
0366   ~BulkPoolAllocator() noexcept { reset(); }
0367 
0368   // Deallocates all allocated memory.
0369   void reset() noexcept
0370   {
0371     while (mListForFree) {
0372       T *tmp = *mListForFree;
0373       free(mListForFree);
0374       mListForFree = reinterpret_cast_no_cast_align_warning<T **>(tmp);
0375     }
0376     mHead = nullptr;
0377   }
0378 
0379   // allocates, but does NOT initialize. Use in-place new constructor, e.g.
0380   //   T* obj = pool.allocate();
0381   //   ::new (static_cast<void*>(obj)) T();
0382   T *allocate()
0383   {
0384     T *tmp = mHead;
0385     if (!tmp) {
0386       tmp = performAllocation();
0387     }
0388 
0389     mHead = *reinterpret_cast_no_cast_align_warning<T **>(tmp);
0390     return tmp;
0391   }
0392 
0393   // does not actually deallocate but puts it in store.
0394   // make sure you have already called the destructor! e.g. with
0395   //  obj->~T();
0396   //  pool.deallocate(obj);
0397   void deallocate(T *obj) noexcept
0398   {
0399     *reinterpret_cast_no_cast_align_warning<T **>(obj) = mHead;
0400     mHead                                              = obj;
0401   }
0402 
0403   // Adds an already allocated block of memory to the allocator. This allocator is from now on
0404   // responsible for freeing the data (with free()). If the provided data is not large enough to
0405   // make use of, it is immediately freed. Otherwise it is reused and freed in the destructor.
0406   void addOrFree(void *ptr, const size_t numBytes) noexcept
0407   {
0408     // calculate number of available elements in ptr
0409     if (numBytes < ALIGNMENT + ALIGNED_SIZE) {
0410       // not enough data for at least one element. Free and return.
0411       free(ptr);
0412     } else {
0413       add(ptr, numBytes);
0414     }
0415   }
0416 
0417   void swap(BulkPoolAllocator<T, MinNumAllocs, MaxNumAllocs> &other) noexcept
0418   {
0419     using std::swap;
0420     swap(mHead, other.mHead);
0421     swap(mListForFree, other.mListForFree);
0422   }
0423 
0424 private:
0425   // iterates the list of allocated memory to calculate how many to alloc next.
0426   // Recalculating this each time saves us a size_t member.
0427   // This ignores the fact that memory blocks might have been added manually with addOrFree. In
0428   // practice, this should not matter much.
0429   ROBIN_HOOD(NODISCARD) size_t calcNumElementsToAlloc() const noexcept
0430   {
0431     auto tmp         = mListForFree;
0432     size_t numAllocs = MinNumAllocs;
0433 
0434     while (numAllocs * 2 <= MaxNumAllocs && tmp) {
0435       auto x = reinterpret_cast<T ***>(tmp);
0436       tmp    = *x;
0437       numAllocs *= 2;
0438     }
0439 
0440     return numAllocs;
0441   }
0442 
0443   // WARNING: Underflow if numBytes < ALIGNMENT! This is guarded in addOrFree().
0444   void add(void *ptr, const size_t numBytes) noexcept
0445   {
0446     const size_t numElements = (numBytes - ALIGNMENT) / ALIGNED_SIZE;
0447 
0448     auto data = reinterpret_cast<T **>(ptr);
0449 
0450     // link free list
0451     auto x       = reinterpret_cast<T ***>(data);
0452     *x           = mListForFree;
0453     mListForFree = data;
0454 
0455     // create linked list for newly allocated data
0456     auto const headT = reinterpret_cast_no_cast_align_warning<T *>(reinterpret_cast<char *>(ptr) + ALIGNMENT);
0457 
0458     auto const head = reinterpret_cast<char *>(headT);
0459 
0460     // Visual Studio compiler automatically unrolls this loop, which is pretty cool
0461     for (size_t i = 0; i < numElements; ++i) {
0462       *reinterpret_cast_no_cast_align_warning<char **>(head + i * ALIGNED_SIZE) = head + (i + 1) * ALIGNED_SIZE;
0463     }
0464 
0465     // last one points to 0
0466     *reinterpret_cast_no_cast_align_warning<T **>(head + (numElements - 1) * ALIGNED_SIZE) = mHead;
0467     mHead                                                                                  = headT;
0468   }
0469 
0470   // Called when no memory is available (mHead == 0).
0471   // Don't inline this slow path.
0472   ROBIN_HOOD(NOINLINE) T *performAllocation()
0473   {
0474     size_t const numElementsToAlloc = calcNumElementsToAlloc();
0475 
0476     // alloc new memory: [prev |T, T, ... T]
0477     // std::cout << (sizeof(T*) + ALIGNED_SIZE * numElementsToAlloc) << " bytes" << std::endl;
0478     size_t const bytes = ALIGNMENT + ALIGNED_SIZE * numElementsToAlloc;
0479     add(assertNotNull<std::bad_alloc>(malloc(bytes)), bytes);
0480     return mHead;
0481   }
0482 
0483   // enforce byte alignment of the T's
0484 #if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
0485   static constexpr size_t ALIGNMENT = (std::max)(std::alignment_of<T>::value, std::alignment_of<T *>::value);
0486 #else
0487   static const size_t ALIGNMENT = (ROBIN_HOOD_STD::alignment_of<T>::value > ROBIN_HOOD_STD::alignment_of<T *>::value)
0488                                       ? ROBIN_HOOD_STD::alignment_of<T>::value
0489                                       : +ROBIN_HOOD_STD::alignment_of<T *>::value; // the + is for walkarround
0490 #endif
0491 
0492   static constexpr size_t ALIGNED_SIZE = ((sizeof(T) - 1) / ALIGNMENT + 1) * ALIGNMENT;
0493 
0494   static_assert(MinNumAllocs >= 1, "MinNumAllocs");
0495   static_assert(MaxNumAllocs >= MinNumAllocs, "MaxNumAllocs");
0496   static_assert(ALIGNED_SIZE >= sizeof(T *), "ALIGNED_SIZE");
0497   static_assert(0 == (ALIGNED_SIZE % sizeof(T *)), "ALIGNED_SIZE mod");
0498   static_assert(ALIGNMENT >= sizeof(T *), "ALIGNMENT");
0499 
0500   T *mHead{nullptr};
0501   T **mListForFree{nullptr};
0502 };
0503 
0504 template <typename T, size_t MinSize, size_t MaxSize, bool IsFlatMap>
0505 struct NodeAllocator;
0506 
0507 // dummy allocator that does nothing
0508 template <typename T, size_t MinSize, size_t MaxSize>
0509 struct NodeAllocator<T, MinSize, MaxSize, true> {
0510 
0511   // we are not using the data, so just free it.
0512   void addOrFree(void *ptr, size_t ROBIN_HOOD_UNUSED(numBytes) /*unused*/) noexcept { free(ptr); }
0513 };
0514 
0515 template <typename T, size_t MinSize, size_t MaxSize>
0516 struct NodeAllocator<T, MinSize, MaxSize, false> : public BulkPoolAllocator<T, MinSize, MaxSize> {
0517 };
0518 
0519 // dummy hash, unsed as mixer when robin_hood::hash is already used
0520 template <typename T>
0521 struct identity_hash {
0522   constexpr size_t operator()(T const &obj) const noexcept { return static_cast<size_t>(obj); }
0523 };
0524 
0525 // c++14 doesn't have is_nothrow_swappable, and clang++ 6.0.1 doesn't like it either, so I'm making
0526 // my own here.
0527 namespace swappable {
0528 using std::swap;
0529 template <typename T>
0530 struct nothrow {
0531   static const bool value = noexcept(swap(std::declval<T &>(), std::declval<T &>()));
0532 };
0533 
0534 } // namespace swappable
0535 
0536 } // namespace detail
0537 
0538 struct is_transparent_tag {
0539 };
0540 
0541 // A custom pair implementation is used in the map because std::pair is not is_trivially_copyable,
0542 // which means it would  not be allowed to be used in std::memcpy. This struct is copyable, which is
0543 // also tested.
0544 template <typename T1, typename T2>
0545 struct pair {
0546   using first_type  = T1;
0547   using second_type = T2;
0548 
0549   template <typename U1 = T1, typename U2 = T2,
0550             typename = typename std::enable_if<std::is_default_constructible<U1>::value &&
0551                                                std::is_default_constructible<U2>::value>::type>
0552   constexpr pair() noexcept(noexcept(U1()) && noexcept(U2())) : first(), second()
0553   {
0554   }
0555 
0556   // pair constructors are explicit so we don't accidentally call this ctor when we don't have to.
0557   explicit constexpr pair(std::pair<T1, T2> const &o) noexcept(noexcept(T1(std::declval<T1 const &>())) &&
0558                                                                noexcept(T2(std::declval<T2 const &>())))
0559       : first(o.first), second(o.second)
0560   {
0561   }
0562 
0563   // pair constructors are explicit so we don't accidentally call this ctor when we don't have to.
0564   explicit constexpr pair(std::pair<T1, T2> &&o) noexcept(noexcept(T1(std::move(std::declval<T1 &&>()))) &&
0565                                                           noexcept(T2(std::move(std::declval<T2 &&>()))))
0566       : first(std::move(o.first)), second(std::move(o.second))
0567   {
0568   }
0569 
0570   constexpr pair(T1 &&a, T2 &&b) noexcept(noexcept(T1(std::move(std::declval<T1 &&>()))) &&
0571                                           noexcept(T2(std::move(std::declval<T2 &&>()))))
0572       : first(std::move(a)), second(std::move(b))
0573   {
0574   }
0575 
0576   template <typename U1, typename U2>
0577   constexpr pair(U1 &&a, U2 &&b) noexcept(noexcept(T1(std::forward<U1>(std::declval<U1 &&>()))) &&
0578                                           noexcept(T2(std::forward<U2>(std::declval<U2 &&>()))))
0579       : first(std::forward<U1>(a)), second(std::forward<U2>(b))
0580   {
0581   }
0582 
0583   template <typename... U1, typename... U2>
0584   constexpr pair(std::piecewise_construct_t /*unused*/, std::tuple<U1...> a, std::tuple<U2...> b) noexcept(
0585       noexcept(pair(std::declval<std::tuple<U1...> &>(), std::declval<std::tuple<U2...> &>(),
0586                     ROBIN_HOOD_STD::index_sequence_for<U1...>(), ROBIN_HOOD_STD::index_sequence_for<U2...>())))
0587       : pair(a, b, ROBIN_HOOD_STD::index_sequence_for<U1...>(), ROBIN_HOOD_STD::index_sequence_for<U2...>())
0588   {
0589   }
0590 
0591   // constructor called from the std::piecewise_construct_t ctor
0592   template <typename... U1, size_t... I1, typename... U2, size_t... I2>
0593   pair(std::tuple<U1...> &a, std::tuple<U2...> &b, ROBIN_HOOD_STD::index_sequence<I1...> /*unused*/,
0594        ROBIN_HOOD_STD::index_sequence<
0595            I2...> /*unused*/) noexcept(noexcept(T1(std::forward<U1>(std::get<I1>(std::declval<std::tuple<U1...>
0596                                                                                                   &>()))...)) &&
0597                                        noexcept(
0598                                            T2(std::forward<U2>(std::get<I2>(std::declval<std::tuple<U2...> &>()))...)))
0599       : first(std::forward<U1>(std::get<I1>(a))...), second(std::forward<U2>(std::get<I2>(b))...)
0600   {
0601     // make visual studio compiler happy about warning about unused a & b.
0602     // Visual studio's pair implementation disables warning 4100.
0603     (void)a;
0604     (void)b;
0605   }
0606 
0607   ROBIN_HOOD(NODISCARD) first_type &getFirst() noexcept { return first; }
0608   ROBIN_HOOD(NODISCARD) first_type const &getFirst() const noexcept { return first; }
0609   ROBIN_HOOD(NODISCARD) second_type &getSecond() noexcept { return second; }
0610   ROBIN_HOOD(NODISCARD) second_type const &getSecond() const noexcept { return second; }
0611 
0612   void swap(pair<T1, T2> &o) noexcept((detail::swappable::nothrow<T1>::value) &&
0613                                       (detail::swappable::nothrow<T2>::value))
0614   {
0615     using std::swap;
0616     swap(first, o.first);
0617     swap(second, o.second);
0618   }
0619 
0620   T1 first;  // NOLINT(misc-non-private-member-variables-in-classes)
0621   T2 second; // NOLINT(misc-non-private-member-variables-in-classes)
0622 };
0623 
0624 template <typename A, typename B>
0625 void swap(pair<A, B> &a,
0626           pair<A, B> &b) noexcept(noexcept(std::declval<pair<A, B> &>().swap(std::declval<pair<A, B> &>())))
0627 {
0628   a.swap(b);
0629 }
0630 
0631 // Hash an arbitrary amount of bytes. This is basically Murmur2 hash without caring about big
0632 // endianness. TODO(martinus) add a fallback for very large strings?
0633 static size_t hash_bytes(void const *ptr, size_t const len) noexcept
0634 {
0635   static constexpr uint64_t m     = UINT64_C(0xc6a4a7935bd1e995);
0636   static constexpr uint64_t seed  = UINT64_C(0xe17a1465);
0637   static constexpr unsigned int r = 47;
0638 
0639   auto const data64 = static_cast<uint64_t const *>(ptr);
0640   uint64_t h        = seed ^ (len * m);
0641 
0642   size_t const n_blocks = len / 8;
0643   for (size_t i = 0; i < n_blocks; ++i) {
0644     auto k = detail::unaligned_load<uint64_t>(data64 + i);
0645 
0646     k *= m;
0647     k ^= k >> r;
0648     k *= m;
0649 
0650     h ^= k;
0651     h *= m;
0652   }
0653 
0654   auto const data8 = reinterpret_cast<uint8_t const *>(data64 + n_blocks);
0655   switch (len & 7U) {
0656   case 7:
0657     h ^= static_cast<uint64_t>(data8[6]) << 48U;
0658     ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
0659   case 6:
0660     h ^= static_cast<uint64_t>(data8[5]) << 40U;
0661     ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
0662   case 5:
0663     h ^= static_cast<uint64_t>(data8[4]) << 32U;
0664     ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
0665   case 4:
0666     h ^= static_cast<uint64_t>(data8[3]) << 24U;
0667     ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
0668   case 3:
0669     h ^= static_cast<uint64_t>(data8[2]) << 16U;
0670     ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
0671   case 2:
0672     h ^= static_cast<uint64_t>(data8[1]) << 8U;
0673     ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
0674   case 1:
0675     h ^= static_cast<uint64_t>(data8[0]);
0676     h *= m;
0677     ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
0678   default:
0679     break;
0680   }
0681 
0682   h ^= h >> r;
0683   h *= m;
0684   h ^= h >> r;
0685   return static_cast<size_t>(h);
0686 }
0687 
0688 inline size_t hash_int(uint64_t obj) noexcept
0689 {
0690 #if ROBIN_HOOD(HAS_UMUL128)
0691   // 167079903232 masksum, 120428523 ops best: 0xde5fb9d2630458e9
0692   static constexpr uint64_t k = UINT64_C(0xde5fb9d2630458e9);
0693   uint64_t h;
0694   uint64_t l = detail::umul128(obj, k, &h);
0695   return h + l;
0696 #elif ROBIN_HOOD(BITNESS) == 32
0697   uint64_t const r = obj * UINT64_C(0xca4bcaa75ec3f625);
0698   auto h           = static_cast<uint32_t>(r >> 32U);
0699   auto l           = static_cast<uint32_t>(r);
0700   return h + l;
0701 #else
0702   // murmurhash 3 finalizer
0703   uint64_t h = obj;
0704   h ^= h >> 33;
0705   h *= 0xff51afd7ed558ccd;
0706   h ^= h >> 33;
0707   h *= 0xc4ceb9fe1a85ec53;
0708   h ^= h >> 33;
0709   return static_cast<size_t>(h);
0710 #endif
0711 }
0712 
0713 // A thin wrapper around std::hash, performing an additional simple mixing step of the result.
0714 template <typename T>
0715 struct hash : public std::hash<T> {
0716   size_t operator()(T const &obj) const
0717       noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const &>())))
0718   {
0719     // call base hash
0720     auto result = std::hash<T>::operator()(obj);
0721     // return mixed of that, to be save against identity has
0722     return hash_int(static_cast<uint64_t>(result));
0723   }
0724 };
0725 
0726 template <>
0727 struct hash<std::string> {
0728   size_t operator()(std::string const &str) const noexcept { return hash_bytes(str.data(), str.size()); }
0729 };
0730 
0731 template <class T>
0732 struct hash<T *> {
0733   size_t operator()(T *ptr) const noexcept { return hash_int(reinterpret_cast<size_t>(ptr)); }
0734 };
0735 
0736 #define ROBIN_HOOD_HASH_INT(T)                                                               \
0737   template <>                                                                                \
0738   struct hash<T> {                                                                           \
0739     size_t operator()(T obj) const noexcept { return hash_int(static_cast<uint64_t>(obj)); } \
0740   }
0741 
0742 #if defined(__GNUC__) && !defined(__clang__)
0743 #pragma GCC diagnostic push
0744 #pragma GCC diagnostic ignored "-Wuseless-cast"
0745 #endif
0746 // see https://en.cppreference.com/w/cpp/utility/hash
0747 ROBIN_HOOD_HASH_INT(bool);
0748 ROBIN_HOOD_HASH_INT(char);
0749 ROBIN_HOOD_HASH_INT(signed char);
0750 ROBIN_HOOD_HASH_INT(unsigned char);
0751 ROBIN_HOOD_HASH_INT(char16_t);
0752 ROBIN_HOOD_HASH_INT(char32_t);
0753 ROBIN_HOOD_HASH_INT(wchar_t);
0754 ROBIN_HOOD_HASH_INT(short);
0755 ROBIN_HOOD_HASH_INT(unsigned short);
0756 ROBIN_HOOD_HASH_INT(int);
0757 ROBIN_HOOD_HASH_INT(unsigned int);
0758 ROBIN_HOOD_HASH_INT(long);
0759 ROBIN_HOOD_HASH_INT(long long);
0760 ROBIN_HOOD_HASH_INT(unsigned long);
0761 ROBIN_HOOD_HASH_INT(unsigned long long);
0762 #if defined(__GNUC__) && !defined(__clang__)
0763 #pragma GCC diagnostic pop
0764 #endif
0765 namespace detail {
0766 
0767 // using wrapper classes for hash and key_equal prevents the diamond problem when the same type is
0768 // used. see https://stackoverflow.com/a/28771920/48181
0769 template <typename T>
0770 struct WrapHash : public T {
0771   WrapHash() = default;
0772   explicit WrapHash(T const &o) noexcept(noexcept(T(std::declval<T const &>()))) : T(o) {}
0773 };
0774 
0775 template <typename T>
0776 struct WrapKeyEqual : public T {
0777   WrapKeyEqual() = default;
0778   explicit WrapKeyEqual(T const &o) noexcept(noexcept(T(std::declval<T const &>()))) : T(o) {}
0779 };
0780 
0781 // A highly optimized hashmap implementation, using the Robin Hood algorithm.
0782 //
0783 // In most cases, this map should be usable as a drop-in replacement for std::unordered_map, but be
0784 // about 2x faster in most cases and require much less allocations.
0785 //
0786 // This implementation uses the following memory layout:
0787 //
0788 // [Node, Node, ... Node | info, info, ... infoSentinel ]
0789 //
0790 // * Node: either a DataNode that directly has the std::pair<key, val> as member,
0791 //   or a DataNode with a pointer to std::pair<key,val>. Which DataNode representation to use
0792 //   depends on how fast the swap() operation is. Heuristically, this is automatically choosen based
0793 //   on sizeof(). there are always 2^n Nodes.
0794 //
0795 // * info: Each Node in the map has a corresponding info byte, so there are 2^n info bytes.
0796 //   Each byte is initialized to 0, meaning the corresponding Node is empty. Set to 1 means the
0797 //   corresponding node contains data. Set to 2 means the corresponding Node is filled, but it
0798 //   actually belongs to the previous position and was pushed out because that place is already
0799 //   taken.
0800 //
0801 // * infoSentinel: Sentinel byte set to 1, so that iterator's ++ can stop at end() without the need
0802 // for a idx
0803 //   variable.
0804 //
0805 // According to STL, order of templates has effect on throughput. That's why I've moved the boolean
0806 // to the front.
0807 // https://www.reddit.com/r/cpp/comments/ahp6iu/compile_time_binary_size_reductions_and_cs_future/eeguck4/
0808 template <bool IsFlatMap, size_t MaxLoadFactor100, typename Key, typename T, typename Hash, typename KeyEqual>
0809 class unordered_map
0810     : public WrapHash<Hash>,
0811       public WrapKeyEqual<KeyEqual>,
0812       detail::NodeAllocator<robin_hood::pair<typename std::conditional<IsFlatMap, Key, Key const>::type, T>, 4, 16384,
0813                             IsFlatMap> {
0814 public:
0815   using key_type    = Key;
0816   using mapped_type = T;
0817   using value_type  = robin_hood::pair<typename std::conditional<IsFlatMap, Key, Key const>::type, T>;
0818   using size_type   = size_t;
0819   using hasher      = Hash;
0820   using key_equal   = KeyEqual;
0821   using Self        = unordered_map<IsFlatMap, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>;
0822   static constexpr bool is_flat_map = IsFlatMap;
0823 
0824 private:
0825   static_assert(MaxLoadFactor100 > 10 && MaxLoadFactor100 < 100, "MaxLoadFactor100 needs to be >10 && < 100");
0826 
0827   using WHash     = WrapHash<Hash>;
0828   using WKeyEqual = WrapKeyEqual<KeyEqual>;
0829 
0830   // configuration defaults
0831 
0832   // make sure we have 8 elements, needed to quickly rehash mInfo
0833   static constexpr size_t InitialNumElements    = sizeof(uint64_t);
0834   static constexpr uint32_t InitialInfoNumBits  = 5;
0835   static constexpr uint8_t InitialInfoInc       = 1U << InitialInfoNumBits;
0836   static constexpr uint8_t InitialInfoHashShift = sizeof(size_t) * 8 - InitialInfoNumBits;
0837   using DataPool                                = detail::NodeAllocator<value_type, 4, 16384, IsFlatMap>;
0838 
0839   // type needs to be wider than uint8_t.
0840   using InfoType = uint32_t;
0841 
0842   // DataNode ////////////////////////////////////////////////////////
0843 
0844   // Primary template for the data node. We have special implementations for small and big
0845   // objects. For large objects it is assumed that swap() is fairly slow, so we allocate these on
0846   // the heap so swap merely swaps a pointer.
0847   template <typename M, bool>
0848   class DataNode {
0849   };
0850 
0851   // Small: just allocate on the stack.
0852   template <typename M>
0853   class DataNode<M, true> final {
0854   public:
0855     template <typename... Args>
0856     explicit DataNode(M &ROBIN_HOOD_UNUSED(map) /*unused*/,
0857                       Args &&... args) noexcept(noexcept(value_type(std::forward<Args>(args)...)))
0858         : mData(std::forward<Args>(args)...)
0859     {
0860     }
0861 
0862     DataNode(M &ROBIN_HOOD_UNUSED(map) /*unused*/,
0863              DataNode<M, true> &&n) noexcept(std::is_nothrow_move_constructible<value_type>::value)
0864         : mData(std::move(n.mData))
0865     {
0866     }
0867 
0868     // doesn't do anything
0869     void destroy(M &ROBIN_HOOD_UNUSED(map) /*unused*/) noexcept {}
0870     void destroyDoNotDeallocate() noexcept {}
0871 
0872     value_type const *operator->() const noexcept { return &mData; }
0873     value_type *operator->() noexcept { return &mData; }
0874 
0875     const value_type &operator*() const noexcept { return mData; }
0876 
0877     value_type &operator*() noexcept { return mData; }
0878 
0879     ROBIN_HOOD(NODISCARD) typename value_type::first_type &getFirst() noexcept { return mData.first; }
0880 
0881     ROBIN_HOOD(NODISCARD) typename value_type::first_type const &getFirst() const noexcept { return mData.first; }
0882 
0883     ROBIN_HOOD(NODISCARD) typename value_type::second_type &getSecond() noexcept { return mData.second; }
0884 
0885     ROBIN_HOOD(NODISCARD) typename value_type::second_type const &getSecond() const noexcept { return mData.second; }
0886 
0887     void swap(DataNode<M, true> &o) noexcept(noexcept(std::declval<value_type>().swap(std::declval<value_type>())))
0888     {
0889       mData.swap(o.mData);
0890     }
0891 
0892   private:
0893     value_type mData;
0894   };
0895 
0896   // big object: allocate on heap.
0897   template <typename M>
0898   class DataNode<M, false> {
0899   public:
0900     template <typename... Args>
0901     explicit DataNode(M &map, Args &&... args) : mData(map.allocate())
0902     {
0903       ::new (static_cast<void *>(mData)) value_type(std::forward<Args>(args)...);
0904     }
0905 
0906     DataNode(M &ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, false> &&n) noexcept : mData(std::move(n.mData)) {}
0907 
0908     void destroy(M &map) noexcept
0909     {
0910       // don't deallocate, just put it into list of datapool.
0911       mData->~value_type();
0912       map.deallocate(mData);
0913     }
0914 
0915     void destroyDoNotDeallocate() noexcept { mData->~value_type(); }
0916 
0917     value_type const *operator->() const noexcept { return mData; }
0918 
0919     value_type *operator->() noexcept { return mData; }
0920 
0921     const value_type &operator*() const { return *mData; }
0922 
0923     value_type &operator*() { return *mData; }
0924 
0925     ROBIN_HOOD(NODISCARD) typename value_type::first_type &getFirst() { return mData->first; }
0926 
0927     ROBIN_HOOD(NODISCARD) typename value_type::first_type const &getFirst() const { return mData->first; }
0928 
0929     ROBIN_HOOD(NODISCARD) typename value_type::second_type &getSecond() { return mData->second; }
0930 
0931     ROBIN_HOOD(NODISCARD) typename value_type::second_type const &getSecond() const { return mData->second; }
0932 
0933     void swap(DataNode<M, false> &o) noexcept
0934     {
0935       using std::swap;
0936       swap(mData, o.mData);
0937     }
0938 
0939   private:
0940     value_type *mData;
0941   };
0942 
0943   using Node = DataNode<Self, IsFlatMap>;
0944 
0945   // Cloner //////////////////////////////////////////////////////////
0946 
0947   template <typename M, bool UseMemcpy>
0948   struct Cloner;
0949 
0950   // fast path: Just copy data, without allocating anything.
0951   template <typename M>
0952   struct Cloner<M, true> {
0953     void operator()(M const &source, M &target) const
0954     {
0955       // std::memcpy(target.mKeyVals, source.mKeyVals,
0956       //             target.calcNumBytesTotal(target.mMask + 1));
0957       auto src = reinterpret_cast<char const *>(source.mKeyVals);
0958       auto tgt = reinterpret_cast<char *>(target.mKeyVals);
0959       std::copy(src, src + target.calcNumBytesTotal(target.mMask + 1), tgt);
0960     }
0961   };
0962 
0963   template <typename M>
0964   struct Cloner<M, false> {
0965     void operator()(M const &s, M &t) const
0966     {
0967       std::copy(s.mInfo, s.mInfo + t.calcNumBytesInfo(t.mMask + 1), t.mInfo);
0968 
0969       for (size_t i = 0; i < t.mMask + 1; ++i) {
0970         if (t.mInfo[i]) {
0971           ::new (static_cast<void *>(t.mKeyVals + i)) Node(t, *s.mKeyVals[i]);
0972         }
0973       }
0974     }
0975   };
0976 
0977   // Destroyer ///////////////////////////////////////////////////////
0978 
0979   template <typename M, bool IsFlatMapAndTrivial>
0980   struct Destroyer {
0981   };
0982 
0983   template <typename M>
0984   struct Destroyer<M, true> {
0985     void nodes(M &m) const noexcept { m.mNumElements = 0; }
0986 
0987     void nodesDoNotDeallocate(M &m) const noexcept { m.mNumElements = 0; }
0988   };
0989 
0990   template <typename M>
0991   struct Destroyer<M, false> {
0992     void nodes(M &m) const noexcept
0993     {
0994       m.mNumElements = 0;
0995       // clear also resets mInfo to 0, that's sometimes not necessary.
0996       for (size_t idx = 0; idx <= m.mMask; ++idx) {
0997         if (0 != m.mInfo[idx]) {
0998           Node &n = m.mKeyVals[idx];
0999           n.destroy(m);
1000           n.~Node();
1001         }
1002       }
1003     }
1004 
1005     void nodesDoNotDeallocate(M &m) const noexcept
1006     {
1007       m.mNumElements = 0;
1008       // clear also resets mInfo to 0, that's sometimes not necessary.
1009       for (size_t idx = 0; idx <= m.mMask; ++idx) {
1010         if (0 != m.mInfo[idx]) {
1011           Node &n = m.mKeyVals[idx];
1012           n.destroyDoNotDeallocate();
1013           n.~Node();
1014         }
1015       }
1016     }
1017   };
1018 
1019   // Iter ////////////////////////////////////////////////////////////
1020 
1021   struct fast_forward_tag {
1022   };
1023 
1024   // generic iterator for both const_iterator and iterator.
1025   template <bool IsConst>
1026   // NOLINTNEXTLINE(hicpp-special-member-functions,cppcoreguidelines-special-member-functions)
1027   class Iter {
1028   private:
1029     using NodePtr = typename std::conditional<IsConst, Node const *, Node *>::type;
1030 
1031   public:
1032     using difference_type   = std::ptrdiff_t;
1033     using value_type        = typename Self::value_type;
1034     using reference         = typename std::conditional<IsConst, value_type const &, value_type &>::type;
1035     using pointer           = typename std::conditional<IsConst, value_type const *, value_type *>::type;
1036     using iterator_category = std::forward_iterator_tag;
1037 
1038     // default constructed iterator can be compared to itself, but WON'T return true when
1039     // compared to end().
1040     Iter() = default;
1041 
1042     // Rule of zero: nothing specified. The conversion constructor is only enabled for iterator
1043     // to const_iterator, so it doesn't accidentally work as a copy ctor.
1044 
1045     // Conversion constructor from iterator to const_iterator.
1046     template <bool OtherIsConst, typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
1047     // NOLINTNEXTLINE(hicpp-explicit-conversions)
1048     Iter(Iter<OtherIsConst> const &other) noexcept : mKeyVals(other.mKeyVals), mInfo(other.mInfo)
1049     {
1050     }
1051 
1052     Iter(NodePtr valPtr, uint8_t const *infoPtr) noexcept : mKeyVals(valPtr), mInfo(infoPtr) {}
1053 
1054     Iter(NodePtr valPtr, uint8_t const *infoPtr, fast_forward_tag ROBIN_HOOD_UNUSED(tag) /*unused*/) noexcept
1055         : mKeyVals(valPtr), mInfo(infoPtr)
1056     {
1057       fastForward();
1058     }
1059 
1060     template <bool OtherIsConst, typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
1061     Iter &operator=(Iter<OtherIsConst> const &other) noexcept
1062     {
1063       mKeyVals = other.mKeyVals;
1064       mInfo    = other.mInfo;
1065       return *this;
1066     }
1067 
1068     // prefix increment. Undefined behavior if we are at end()!
1069     Iter &operator++() noexcept
1070     {
1071       mInfo++;
1072       mKeyVals++;
1073       fastForward();
1074       return *this;
1075     }
1076 
1077     reference operator*() const { return **mKeyVals; }
1078 
1079     pointer operator->() const { return &**mKeyVals; }
1080 
1081     template <bool O>
1082     bool operator==(Iter<O> const &o) const noexcept
1083     {
1084       return mKeyVals == o.mKeyVals;
1085     }
1086 
1087     template <bool O>
1088     bool operator!=(Iter<O> const &o) const noexcept
1089     {
1090       return mKeyVals != o.mKeyVals;
1091     }
1092 
1093   private:
1094     // fast forward to the next non-free info byte
1095     void fastForward() noexcept
1096     {
1097       int inc;
1098       do {
1099         auto const n = detail::unaligned_load<size_t>(mInfo);
1100 #if ROBIN_HOOD(LITTLE_ENDIAN)
1101         inc = ROBIN_HOOD_COUNT_TRAILING_ZEROES(n) / 8;
1102 #else
1103         inc = ROBIN_HOOD_COUNT_LEADING_ZEROES(n) / 8;
1104 #endif
1105         mInfo += inc;
1106         mKeyVals += inc;
1107       } while (inc == static_cast<int>(sizeof(size_t)));
1108     }
1109 
1110     friend class unordered_map<IsFlatMap, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>;
1111     NodePtr mKeyVals{nullptr};
1112     uint8_t const *mInfo{nullptr};
1113   };
1114 
1115   ////////////////////////////////////////////////////////////////////
1116 
1117   // highly performance relevant code.
1118   // Lower bits are used for indexing into the array (2^n size)
1119   // The upper 1-5 bits need to be a reasonable good hash, to save comparisons.
1120   template <typename HashKey>
1121   void keyToIdx(HashKey &&key, size_t *idx, InfoType *info) const
1122   {
1123     // for a user-specified hash that is *not* robin_hood::hash, apply robin_hood::hash as an
1124     // additional mixing step. This serves as a bad hash prevention, if the given data is badly
1125     // mixed.
1126     using Mix =
1127         typename std::conditional<std::is_same<::robin_hood::hash<key_type>, hasher>::value,
1128                                   ::robin_hood::detail::identity_hash<size_t>, ::robin_hood::hash<size_t>>::type;
1129     *idx = Mix{}(WHash::operator()(key));
1130 
1131     *info = mInfoInc + static_cast<InfoType>(*idx >> mInfoHashShift);
1132     *idx &= mMask;
1133   }
1134 
1135   // forwards the index by one, wrapping around at the end
1136   void next(InfoType *info, size_t *idx) const noexcept
1137   {
1138     *idx = (*idx + 1) & mMask;
1139     *info += mInfoInc;
1140   }
1141 
1142   void nextWhileLess(InfoType *info, size_t *idx) const noexcept
1143   {
1144     // unrolling this by hand did not bring any speedups.
1145     while (*info < mInfo[*idx]) {
1146       next(info, idx);
1147     }
1148   }
1149 
1150   // Shift everything up by one element. Tries to move stuff around.
1151   // True if some shifting has occured (entry under idx is a constructed object)
1152   // Fals if no shift has occured (entry under idx is unconstructed memory)
1153   void shiftUp(size_t idx, size_t const insertion_idx) noexcept(std::is_nothrow_move_assignable<Node>::value)
1154   {
1155     while (idx != insertion_idx) {
1156       size_t prev_idx = (idx - 1) & mMask;
1157       if (mInfo[idx]) {
1158         mKeyVals[idx] = std::move(mKeyVals[prev_idx]);
1159       } else {
1160         ::new (static_cast<void *>(mKeyVals + idx)) Node(std::move(mKeyVals[prev_idx]));
1161       }
1162       mInfo[idx] = static_cast<uint8_t>(mInfo[prev_idx] + mInfoInc);
1163       if (ROBIN_HOOD_UNLIKELY(mInfo[idx] + mInfoInc > 0xFF)) {
1164         mMaxNumElementsAllowed = 0;
1165       }
1166       idx = prev_idx;
1167     }
1168   }
1169 
1170   void shiftDown(size_t idx) noexcept(std::is_nothrow_move_assignable<Node>::value)
1171   {
1172     // until we find one that is either empty or has zero offset.
1173     // TODO(martinus) we don't need to move everything, just the last one for the same bucket.
1174     mKeyVals[idx].destroy(*this);
1175 
1176     // until we find one that is either empty or has zero offset.
1177     size_t nextIdx = (idx + 1) & mMask;
1178     while (mInfo[nextIdx] >= 2 * mInfoInc) {
1179       mInfo[idx]    = static_cast<uint8_t>(mInfo[nextIdx] - mInfoInc);
1180       mKeyVals[idx] = std::move(mKeyVals[nextIdx]);
1181       idx           = nextIdx;
1182       nextIdx       = (idx + 1) & mMask;
1183     }
1184 
1185     mInfo[idx] = 0;
1186     // don't destroy, we've moved it
1187     // mKeyVals[idx].destroy(*this);
1188     mKeyVals[idx].~Node();
1189   }
1190 
1191   // copy of find(), except that it returns iterator instead of const_iterator.
1192   template <typename Other>
1193   ROBIN_HOOD(NODISCARD)
1194   size_t findIdx(Other const &key) const
1195   {
1196     size_t idx;
1197     InfoType info;
1198     keyToIdx(key, &idx, &info);
1199 
1200     do {
1201       // unrolling this twice gives a bit of a speedup. More unrolling did not help.
1202       if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
1203         return idx;
1204       }
1205       next(&info, &idx);
1206       if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
1207         return idx;
1208       }
1209       next(&info, &idx);
1210     } while (info <= mInfo[idx]);
1211 
1212     // nothing found!
1213     return mMask == 0 ? 0 : mMask + 1;
1214   }
1215 
1216   void cloneData(const unordered_map &o)
1217   {
1218     Cloner<unordered_map, IsFlatMap && ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(Node)>()(o, *this);
1219   }
1220 
1221   // inserts a keyval that is guaranteed to be new, e.g. when the hashmap is resized.
1222   // @return index where the element was created
1223   size_t insert_move(Node &&keyval)
1224   {
1225     // we don't retry, fail if overflowing
1226     // don't need to check max num elements
1227     if (0 == mMaxNumElementsAllowed && !try_increase_info()) {
1228       throwOverflowError();
1229     }
1230 
1231     size_t idx;
1232     InfoType info;
1233     keyToIdx(keyval.getFirst(), &idx, &info);
1234 
1235     // skip forward. Use <= because we are certain that the element is not there.
1236     while (info <= mInfo[idx]) {
1237       idx = (idx + 1) & mMask;
1238       info += mInfoInc;
1239     }
1240 
1241     // key not found, so we are now exactly where we want to insert it.
1242     auto const insertion_idx  = idx;
1243     auto const insertion_info = static_cast<uint8_t>(info);
1244     if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
1245       mMaxNumElementsAllowed = 0;
1246     }
1247 
1248     // find an empty spot
1249     while (0 != mInfo[idx]) {
1250       next(&info, &idx);
1251     }
1252 
1253     auto &l = mKeyVals[insertion_idx];
1254     if (idx == insertion_idx) {
1255       ::new (static_cast<void *>(&l)) Node(std::move(keyval));
1256     } else {
1257       shiftUp(idx, insertion_idx);
1258       l = std::move(keyval);
1259     }
1260 
1261     // put at empty spot
1262     mInfo[insertion_idx] = insertion_info;
1263 
1264     ++mNumElements;
1265     return insertion_idx;
1266   }
1267 
1268 public:
1269   using iterator       = Iter<false>;
1270   using const_iterator = Iter<true>;
1271 
1272   // Creates an empty hash map. Nothing is allocated yet, this happens at the first insert. This
1273   // tremendously speeds up ctor & dtor of a map that never receives an element. The penalty is
1274   // payed at the first insert, and not before. Lookup of this empty map works because everybody
1275   // points to DummyInfoByte::b. parameter bucket_count is dictated by the standard, but we can
1276   // ignore it.
1277   explicit unordered_map(size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, const Hash &h = Hash{},
1278                          const KeyEqual &equal = KeyEqual{}) noexcept(noexcept(Hash(h)) && noexcept(KeyEqual(equal)))
1279       : WHash(h), WKeyEqual(equal)
1280   {
1281     ROBIN_HOOD_TRACE(this);
1282   }
1283 
1284   template <typename Iter>
1285   unordered_map(Iter first, Iter last, size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, const Hash &h = Hash{},
1286                 const KeyEqual &equal = KeyEqual{})
1287       : WHash(h), WKeyEqual(equal)
1288   {
1289     ROBIN_HOOD_TRACE(this);
1290     insert(first, last);
1291   }
1292 
1293   unordered_map(std::initializer_list<value_type> initlist, size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0,
1294                 const Hash &h = Hash{}, const KeyEqual &equal = KeyEqual{})
1295       : WHash(h), WKeyEqual(equal)
1296   {
1297     ROBIN_HOOD_TRACE(this);
1298     insert(initlist.begin(), initlist.end());
1299   }
1300 
1301   unordered_map(unordered_map &&o) noexcept
1302       : WHash(std::move(static_cast<WHash &>(o))), WKeyEqual(std::move(static_cast<WKeyEqual &>(o))),
1303         DataPool(std::move(static_cast<DataPool &>(o)))
1304   {
1305     ROBIN_HOOD_TRACE(this);
1306     if (o.mMask) {
1307       mKeyVals               = std::move(o.mKeyVals);
1308       mInfo                  = std::move(o.mInfo);
1309       mNumElements           = std::move(o.mNumElements);
1310       mMask                  = std::move(o.mMask);
1311       mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed);
1312       mInfoInc               = std::move(o.mInfoInc);
1313       mInfoHashShift         = std::move(o.mInfoHashShift);
1314       // set other's mask to 0 so its destructor won't do anything
1315       o.init();
1316     }
1317   }
1318 
1319   unordered_map &operator=(unordered_map &&o) noexcept
1320   {
1321     ROBIN_HOOD_TRACE(this);
1322     if (&o != this) {
1323       if (o.mMask) {
1324         // only move stuff if the other map actually has some data
1325         destroy();
1326         mKeyVals               = std::move(o.mKeyVals);
1327         mInfo                  = std::move(o.mInfo);
1328         mNumElements           = std::move(o.mNumElements);
1329         mMask                  = std::move(o.mMask);
1330         mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed);
1331         mInfoInc               = std::move(o.mInfoInc);
1332         mInfoHashShift         = std::move(o.mInfoHashShift);
1333         WHash::operator        =(std::move(static_cast<WHash &>(o)));
1334         WKeyEqual::operator    =(std::move(static_cast<WKeyEqual &>(o)));
1335         DataPool::operator     =(std::move(static_cast<DataPool &>(o)));
1336 
1337         o.init();
1338 
1339       } else {
1340         // nothing in the other map => just clear us.
1341         clear();
1342       }
1343     }
1344     return *this;
1345   }
1346 
1347   unordered_map(const unordered_map &o)
1348       : WHash(static_cast<const WHash &>(o)), WKeyEqual(static_cast<const WKeyEqual &>(o)),
1349         DataPool(static_cast<const DataPool &>(o))
1350   {
1351     ROBIN_HOOD_TRACE(this);
1352     if (!o.empty()) {
1353       // not empty: create an exact copy. it is also possible to just iterate through all
1354       // elements and insert them, but copying is probably faster.
1355 
1356       mKeyVals = static_cast<Node *>(detail::assertNotNull<std::bad_alloc>(malloc(calcNumBytesTotal(o.mMask + 1))));
1357       // no need for calloc because clonData does memcpy
1358       mInfo                  = reinterpret_cast<uint8_t *>(mKeyVals + o.mMask + 1);
1359       mNumElements           = o.mNumElements;
1360       mMask                  = o.mMask;
1361       mMaxNumElementsAllowed = o.mMaxNumElementsAllowed;
1362       mInfoInc               = o.mInfoInc;
1363       mInfoHashShift         = o.mInfoHashShift;
1364       cloneData(o);
1365     }
1366   }
1367 
1368   // Creates a copy of the given map. Copy constructor of each entry is used.
1369   unordered_map &operator=(unordered_map const &o)
1370   {
1371     ROBIN_HOOD_TRACE(this);
1372     if (&o == this) {
1373       // prevent assigning of itself
1374       return *this;
1375     }
1376 
1377     // we keep using the old allocator and not assign the new one, because we want to keep the
1378     // memory available. when it is the same size.
1379     if (o.empty()) {
1380       if (0 == mMask) {
1381         // nothing to do, we are empty too
1382         return *this;
1383       }
1384 
1385       // not empty: destroy what we have there
1386       // clear also resets mInfo to 0, that's sometimes not necessary.
1387       destroy();
1388       init();
1389       WHash::operator    =(static_cast<const WHash &>(o));
1390       WKeyEqual::operator=(static_cast<const WKeyEqual &>(o));
1391       DataPool::operator =(static_cast<DataPool const &>(o));
1392 
1393       return *this;
1394     }
1395 
1396     // clean up old stuff
1397     Destroyer<Self, IsFlatMap && std::is_trivially_destructible<Node>::value>{}.nodes(*this);
1398 
1399     if (mMask != o.mMask) {
1400       // no luck: we don't have the same array size allocated, so we need to realloc.
1401       if (0 != mMask) {
1402         // only deallocate if we actually have data!
1403         free(mKeyVals);
1404       }
1405 
1406       mKeyVals = static_cast<Node *>(detail::assertNotNull<std::bad_alloc>(malloc(calcNumBytesTotal(o.mMask + 1))));
1407 
1408       // no need for calloc here because cloneData performs a memcpy.
1409       mInfo = reinterpret_cast<uint8_t *>(mKeyVals + o.mMask + 1);
1410       // sentinel is set in cloneData
1411     }
1412     WHash::operator        =(static_cast<const WHash &>(o));
1413     WKeyEqual::operator    =(static_cast<const WKeyEqual &>(o));
1414     DataPool::operator     =(static_cast<DataPool const &>(o));
1415     mNumElements           = o.mNumElements;
1416     mMask                  = o.mMask;
1417     mMaxNumElementsAllowed = o.mMaxNumElementsAllowed;
1418     mInfoInc               = o.mInfoInc;
1419     mInfoHashShift         = o.mInfoHashShift;
1420     cloneData(o);
1421 
1422     return *this;
1423   }
1424 
1425   // Swaps everything between the two maps.
1426   void swap(unordered_map &o)
1427   {
1428     ROBIN_HOOD_TRACE(this);
1429     using std::swap;
1430     swap(o, *this);
1431   }
1432 
1433   // Clears all data, without resizing.
1434   void clear()
1435   {
1436     ROBIN_HOOD_TRACE(this);
1437     if (empty()) {
1438       // don't do anything! also important because we don't want to write to DummyInfoByte::b,
1439       // even though we would just write 0 to it.
1440       return;
1441     }
1442 
1443     Destroyer<Self, IsFlatMap && std::is_trivially_destructible<Node>::value>{}.nodes(*this);
1444 
1445     // clear everything except the sentinel
1446     // std::memset(mInfo, 0, sizeof(uint8_t) * (mMask + 1));
1447     uint8_t const z = 0;
1448     std::fill(mInfo, mInfo + (sizeof(uint8_t) * (mMask + 1)), z);
1449 
1450     mInfoInc       = InitialInfoInc;
1451     mInfoHashShift = InitialInfoHashShift;
1452   }
1453 
1454   // Destroys the map and all it's contents.
1455   ~unordered_map()
1456   {
1457     ROBIN_HOOD_TRACE(this);
1458     destroy();
1459   }
1460 
1461   // Checks if both maps contain the same entries. Order is irrelevant.
1462   bool operator==(const unordered_map &other) const
1463   {
1464     ROBIN_HOOD_TRACE(this);
1465     if (other.size() != size()) {
1466       return false;
1467     }
1468     for (auto const &otherEntry : other) {
1469       auto const myIt = find(otherEntry.first);
1470       if (myIt == end() || !(myIt->second == otherEntry.second)) {
1471         return false;
1472       }
1473     }
1474 
1475     return true;
1476   }
1477 
1478   bool operator!=(const unordered_map &other) const
1479   {
1480     ROBIN_HOOD_TRACE(this);
1481     return !operator==(other);
1482   }
1483 
1484   mapped_type &operator[](const key_type &key)
1485   {
1486     ROBIN_HOOD_TRACE(this);
1487     return doCreateByKey(key);
1488   }
1489 
1490   mapped_type &operator[](key_type &&key)
1491   {
1492     ROBIN_HOOD_TRACE(this);
1493     return doCreateByKey(std::move(key));
1494   }
1495 
1496   template <typename Iter>
1497   void insert(Iter first, Iter last)
1498   {
1499     for (; first != last; ++first) {
1500       // value_type ctor needed because this might be called with std::pair's
1501       insert(value_type(*first));
1502     }
1503   }
1504 
1505   template <typename... Args>
1506   std::pair<iterator, bool> emplace(Args &&... args)
1507   {
1508     ROBIN_HOOD_TRACE(this);
1509     Node n{*this, std::forward<Args>(args)...};
1510     auto r = doInsert(std::move(n));
1511     if (!r.second) {
1512       // insertion not possible: destroy node
1513       // NOLINTNEXTLINE(bugprone-use-after-move)
1514       n.destroy(*this);
1515     }
1516     return r;
1517   }
1518 
1519   std::pair<iterator, bool> insert(const value_type &keyval)
1520   {
1521     ROBIN_HOOD_TRACE(this);
1522     return doInsert(keyval);
1523   }
1524 
1525   std::pair<iterator, bool> insert(value_type &&keyval) { return doInsert(std::move(keyval)); }
1526 
1527   // Returns 1 if key is found, 0 otherwise.
1528   size_t count(const key_type &key) const
1529   { // NOLINT(modernize-use-nodiscard)
1530     ROBIN_HOOD_TRACE(this);
1531     auto kv = mKeyVals + findIdx(key);
1532     if (kv != reinterpret_cast_no_cast_align_warning<Node *>(mInfo)) {
1533       return 1;
1534     }
1535     return 0;
1536   }
1537 
1538   // Returns a reference to the value found for key.
1539   // Throws std::out_of_range if element cannot be found
1540   mapped_type &at(key_type const &key)
1541   {
1542     ROBIN_HOOD_TRACE(this);
1543     auto kv = mKeyVals + findIdx(key);
1544     if (kv == reinterpret_cast_no_cast_align_warning<Node *>(mInfo)) {
1545       doThrow<std::out_of_range>("key not found");
1546     }
1547     return kv->getSecond();
1548   }
1549 
1550   // Returns a reference to the value found for key.
1551   // Throws std::out_of_range if element cannot be found
1552   mapped_type const &at(key_type const &key) const
1553   { // NOLINT(modernize-use-nodiscard)
1554     ROBIN_HOOD_TRACE(this);
1555     auto kv = mKeyVals + findIdx(key);
1556     if (kv == reinterpret_cast_no_cast_align_warning<Node *>(mInfo)) {
1557       doThrow<std::out_of_range>("key not found");
1558     }
1559     return kv->getSecond();
1560   }
1561 
1562   const_iterator find(const key_type &key) const
1563   { // NOLINT(modernize-use-nodiscard)
1564     ROBIN_HOOD_TRACE(this);
1565     const size_t idx = findIdx(key);
1566     return const_iterator{mKeyVals + idx, mInfo + idx};
1567   }
1568 
1569   template <typename OtherKey>
1570   const_iterator find(const OtherKey &key, is_transparent_tag /*unused*/) const
1571   {
1572     ROBIN_HOOD_TRACE(this);
1573     const size_t idx = findIdx(key);
1574     return const_iterator{mKeyVals + idx, mInfo + idx};
1575   }
1576 
1577   iterator find(const key_type &key)
1578   {
1579     ROBIN_HOOD_TRACE(this);
1580     const size_t idx = findIdx(key);
1581     return iterator{mKeyVals + idx, mInfo + idx};
1582   }
1583 
1584   template <typename OtherKey>
1585   iterator find(const OtherKey &key, is_transparent_tag /*unused*/)
1586   {
1587     ROBIN_HOOD_TRACE(this);
1588     const size_t idx = findIdx(key);
1589     return iterator{mKeyVals + idx, mInfo + idx};
1590   }
1591 
1592   iterator begin()
1593   {
1594     ROBIN_HOOD_TRACE(this);
1595     if (empty()) {
1596       return end();
1597     }
1598     return iterator(mKeyVals, mInfo, fast_forward_tag{});
1599   }
1600   const_iterator begin() const
1601   { // NOLINT(modernize-use-nodiscard)
1602     ROBIN_HOOD_TRACE(this);
1603     return cbegin();
1604   }
1605   const_iterator cbegin() const
1606   { // NOLINT(modernize-use-nodiscard)
1607     ROBIN_HOOD_TRACE(this);
1608     if (empty()) {
1609       return cend();
1610     }
1611     return const_iterator(mKeyVals, mInfo, fast_forward_tag{});
1612   }
1613 
1614   iterator end()
1615   {
1616     ROBIN_HOOD_TRACE(this);
1617     // no need to supply valid info pointer: end() must not be dereferenced, and only node
1618     // pointer is compared.
1619     return iterator{reinterpret_cast_no_cast_align_warning<Node *>(mInfo), nullptr};
1620   }
1621   const_iterator end() const
1622   { // NOLINT(modernize-use-nodiscard)
1623     ROBIN_HOOD_TRACE(this);
1624     return cend();
1625   }
1626   const_iterator cend() const
1627   { // NOLINT(modernize-use-nodiscard)
1628     ROBIN_HOOD_TRACE(this);
1629     return const_iterator{reinterpret_cast_no_cast_align_warning<Node *>(mInfo), nullptr};
1630   }
1631 
1632   iterator erase(const_iterator pos)
1633   {
1634     ROBIN_HOOD_TRACE(this);
1635     // its safe to perform const cast here
1636     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
1637     return erase(iterator{const_cast<Node *>(pos.mKeyVals), const_cast<uint8_t *>(pos.mInfo)});
1638   }
1639 
1640   // Erases element at pos, returns iterator to the next element.
1641   iterator erase(iterator pos)
1642   {
1643     ROBIN_HOOD_TRACE(this);
1644     // we assume that pos always points to a valid entry, and not end().
1645     auto const idx = static_cast<size_t>(pos.mKeyVals - mKeyVals);
1646 
1647     shiftDown(idx);
1648     --mNumElements;
1649 
1650     if (*pos.mInfo) {
1651       // we've backward shifted, return this again
1652       return pos;
1653     }
1654 
1655     // no backward shift, return next element
1656     return ++pos;
1657   }
1658 
1659   size_t erase(const key_type &key)
1660   {
1661     ROBIN_HOOD_TRACE(this);
1662     size_t idx;
1663     InfoType info;
1664     keyToIdx(key, &idx, &info);
1665 
1666     // check while info matches with the source idx
1667     do {
1668       if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
1669         shiftDown(idx);
1670         --mNumElements;
1671         return 1;
1672       }
1673       next(&info, &idx);
1674     } while (info <= mInfo[idx]);
1675 
1676     // nothing found to delete
1677     return 0;
1678   }
1679 
1680   // reserves space for the specified number of elements. Makes sure the old data fits.
1681   // exactly the same as reserve(c).
1682   void rehash(size_t c) { reserve(c); }
1683 
1684   // reserves space for the specified number of elements. Makes sure the old data fits.
1685   // Exactly the same as resize(c). Use resize(0) to shrink to fit.
1686   void reserve(size_t c)
1687   {
1688     ROBIN_HOOD_TRACE(this);
1689     auto const minElementsAllowed = (std::max)(c, mNumElements);
1690     auto newSize                  = InitialNumElements;
1691     while (calcMaxNumElementsAllowed(newSize) < minElementsAllowed && newSize != 0) {
1692       newSize *= 2;
1693     }
1694     if (ROBIN_HOOD_UNLIKELY(newSize == 0)) {
1695       throwOverflowError();
1696     }
1697 
1698     rehashPowerOfTwo(newSize);
1699   }
1700 
1701   size_type size() const noexcept
1702   { // NOLINT(modernize-use-nodiscard)
1703     ROBIN_HOOD_TRACE(this);
1704     return mNumElements;
1705   }
1706 
1707   size_type max_size() const noexcept
1708   { // NOLINT(modernize-use-nodiscard)
1709     ROBIN_HOOD_TRACE(this);
1710     return static_cast<size_type>(-1);
1711   }
1712 
1713   ROBIN_HOOD(NODISCARD) bool empty() const noexcept
1714   {
1715     ROBIN_HOOD_TRACE(this);
1716     return 0 == mNumElements;
1717   }
1718 
1719   float max_load_factor() const noexcept
1720   { // NOLINT(modernize-use-nodiscard)
1721     ROBIN_HOOD_TRACE(this);
1722     return MaxLoadFactor100 / 100.0F;
1723   }
1724 
1725   // Average number of elements per bucket. Since we allow only 1 per bucket
1726   float load_factor() const noexcept
1727   { // NOLINT(modernize-use-nodiscard)
1728     ROBIN_HOOD_TRACE(this);
1729     return static_cast<float>(size()) / static_cast<float>(mMask + 1);
1730   }
1731 
1732   ROBIN_HOOD(NODISCARD) size_t mask() const noexcept
1733   {
1734     ROBIN_HOOD_TRACE(this);
1735     return mMask;
1736   }
1737 
1738   ROBIN_HOOD(NODISCARD) size_t calcMaxNumElementsAllowed(size_t maxElements) const noexcept
1739   {
1740     if (ROBIN_HOOD_LIKELY(maxElements <= (std::numeric_limits<size_t>::max)() / 100)) {
1741       return maxElements * MaxLoadFactor100 / 100;
1742     }
1743 
1744     // we might be a bit inprecise, but since maxElements is quite large that doesn't matter
1745     return (maxElements / 100) * MaxLoadFactor100;
1746   }
1747 
1748   ROBIN_HOOD(NODISCARD) size_t calcNumBytesInfo(size_t numElements) const { return numElements + sizeof(uint64_t); }
1749 
1750   // calculation ony allowed for 2^n values
1751   ROBIN_HOOD(NODISCARD) size_t calcNumBytesTotal(size_t numElements) const
1752   {
1753 #if ROBIN_HOOD(BITNESS) == 64
1754     return numElements * sizeof(Node) + calcNumBytesInfo(numElements);
1755 #else
1756     // make sure we're doing 64bit operations, so we are at least safe against 32bit overflows.
1757     auto const ne    = static_cast<uint64_t>(numElements);
1758     auto const s     = static_cast<uint64_t>(sizeof(Node));
1759     auto const infos = static_cast<uint64_t>(calcNumBytesInfo(numElements));
1760 
1761     auto const total64 = ne * s + infos;
1762     auto const total   = static_cast<size_t>(total64);
1763 
1764     if (ROBIN_HOOD_UNLIKELY(static_cast<uint64_t>(total) != total64)) {
1765       throwOverflowError();
1766     }
1767     return total;
1768 #endif
1769   }
1770 
1771 private:
1772   // reserves space for at least the specified number of elements.
1773   // only works if numBuckets if power of two
1774   void rehashPowerOfTwo(size_t numBuckets)
1775   {
1776     ROBIN_HOOD_TRACE(this);
1777 
1778     Node *const oldKeyVals       = mKeyVals;
1779     uint8_t const *const oldInfo = mInfo;
1780 
1781     const size_t oldMaxElements = mMask + 1;
1782 
1783     // resize operation: move stuff
1784     init_data(numBuckets);
1785     if (oldMaxElements > 1) {
1786       for (size_t i = 0; i < oldMaxElements; ++i) {
1787         if (oldInfo[i] != 0) {
1788           insert_move(std::move(oldKeyVals[i]));
1789           // destroy the node but DON'T destroy the data.
1790           oldKeyVals[i].~Node();
1791         }
1792       }
1793 
1794       // don't destroy old data: put it into the pool instead
1795       DataPool::addOrFree(oldKeyVals, calcNumBytesTotal(oldMaxElements));
1796     }
1797   }
1798 
1799   void throwOverflowError() const
1800   {
1801 #if ROBIN_HOOD(HAS_EXCEPTIONS)
1802     throw std::overflow_error("robin_hood::map overflow");
1803 #else
1804     abort();
1805 #endif
1806   }
1807 
1808   void init_data(size_t max_elements)
1809   {
1810     mNumElements           = 0;
1811     mMask                  = max_elements - 1;
1812     mMaxNumElementsAllowed = calcMaxNumElementsAllowed(max_elements);
1813 
1814     // calloc also zeroes everything
1815     mKeyVals =
1816         reinterpret_cast<Node *>(detail::assertNotNull<std::bad_alloc>(calloc(1, calcNumBytesTotal(max_elements))));
1817     mInfo = reinterpret_cast<uint8_t *>(mKeyVals + max_elements);
1818 
1819     // set sentinel
1820     mInfo[max_elements] = 1;
1821 
1822     mInfoInc       = InitialInfoInc;
1823     mInfoHashShift = InitialInfoHashShift;
1824   }
1825 
1826   template <typename Arg>
1827   mapped_type &doCreateByKey(Arg &&key)
1828   {
1829     while (true) {
1830       size_t idx;
1831       InfoType info;
1832       keyToIdx(key, &idx, &info);
1833       nextWhileLess(&info, &idx);
1834 
1835       // while we potentially have a match. Can't do a do-while here because when mInfo is 0
1836       // we don't want to skip forward
1837       while (info == mInfo[idx]) {
1838         if (WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) {
1839           // key already exists, do not insert.
1840           return mKeyVals[idx].getSecond();
1841         }
1842         next(&info, &idx);
1843       }
1844 
1845       // unlikely that this evaluates to true
1846       if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) {
1847         increase_size();
1848         continue;
1849       }
1850 
1851       // key not found, so we are now exactly where we want to insert it.
1852       auto const insertion_idx  = idx;
1853       auto const insertion_info = info;
1854       if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
1855         mMaxNumElementsAllowed = 0;
1856       }
1857 
1858       // find an empty spot
1859       while (0 != mInfo[idx]) {
1860         next(&info, &idx);
1861       }
1862 
1863       auto &l = mKeyVals[insertion_idx];
1864       if (idx == insertion_idx) {
1865         // put at empty spot. This forwards all arguments into the node where the object is
1866         // constructed exactly where it is needed.
1867         ::new (static_cast<void *>(&l)) Node(*this, std::piecewise_construct,
1868                                              std::forward_as_tuple(std::forward<Arg>(key)), std::forward_as_tuple());
1869       } else {
1870         shiftUp(idx, insertion_idx);
1871         l = Node(*this, std::piecewise_construct, std::forward_as_tuple(std::forward<Arg>(key)),
1872                  std::forward_as_tuple());
1873       }
1874 
1875       // mKeyVals[idx].getFirst() = std::move(key);
1876       mInfo[insertion_idx] = static_cast<uint8_t>(insertion_info);
1877 
1878       ++mNumElements;
1879       return mKeyVals[insertion_idx].getSecond();
1880     }
1881   }
1882 
1883   // This is exactly the same code as operator[], except for the return values
1884   template <typename Arg>
1885   std::pair<iterator, bool> doInsert(Arg &&keyval)
1886   {
1887     while (true) {
1888       size_t idx;
1889       InfoType info;
1890       keyToIdx(keyval.getFirst(), &idx, &info);
1891       nextWhileLess(&info, &idx);
1892 
1893       // while we potentially have a match
1894       while (info == mInfo[idx]) {
1895         if (WKeyEqual::operator()(keyval.getFirst(), mKeyVals[idx].getFirst())) {
1896           // key already exists, do NOT insert.
1897           // see http://en.cppreference.com/w/cpp/container/unordered_map/insert
1898           return std::make_pair<iterator, bool>(iterator(mKeyVals + idx, mInfo + idx), false);
1899         }
1900         next(&info, &idx);
1901       }
1902 
1903       // unlikely that this evaluates to true
1904       if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) {
1905         increase_size();
1906         continue;
1907       }
1908 
1909       // key not found, so we are now exactly where we want to insert it.
1910       auto const insertion_idx  = idx;
1911       auto const insertion_info = info;
1912       if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) {
1913         mMaxNumElementsAllowed = 0;
1914       }
1915 
1916       // find an empty spot
1917       while (0 != mInfo[idx]) {
1918         next(&info, &idx);
1919       }
1920 
1921       auto &l = mKeyVals[insertion_idx];
1922       if (idx == insertion_idx) {
1923         ::new (static_cast<void *>(&l)) Node(*this, std::forward<Arg>(keyval));
1924       } else {
1925         shiftUp(idx, insertion_idx);
1926         l = Node(*this, std::forward<Arg>(keyval));
1927       }
1928 
1929       // put at empty spot
1930       mInfo[insertion_idx] = static_cast<uint8_t>(insertion_info);
1931 
1932       ++mNumElements;
1933       return std::make_pair(iterator(mKeyVals + insertion_idx, mInfo + insertion_idx), true);
1934     }
1935   }
1936 
1937   bool try_increase_info()
1938   {
1939     ROBIN_HOOD_LOG("mInfoInc=" << mInfoInc << ", numElements=" << mNumElements
1940                                << ", maxNumElementsAllowed=" << calcMaxNumElementsAllowed(mMask + 1));
1941     if (mInfoInc <= 2) {
1942       // need to be > 2 so that shift works (otherwise undefined behavior!)
1943       return false;
1944     }
1945     // we got space left, try to make info smaller
1946     mInfoInc = static_cast<uint8_t>(mInfoInc >> 1U);
1947 
1948     // remove one bit of the hash, leaving more space for the distance info.
1949     // This is extremely fast because we can operate on 8 bytes at once.
1950     ++mInfoHashShift;
1951     auto const data       = reinterpret_cast_no_cast_align_warning<uint64_t *>(mInfo);
1952     auto const numEntries = (mMask + 1) / 8;
1953 
1954     for (size_t i = 0; i < numEntries; ++i) {
1955       data[i] = (data[i] >> 1U) & UINT64_C(0x7f7f7f7f7f7f7f7f);
1956     }
1957     mMaxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1);
1958     return true;
1959   }
1960 
1961   void increase_size()
1962   {
1963     // nothing allocated yet? just allocate InitialNumElements
1964     if (0 == mMask) {
1965       init_data(InitialNumElements);
1966       return;
1967     }
1968 
1969     auto const maxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1);
1970     if (mNumElements < maxNumElementsAllowed && try_increase_info()) {
1971       return;
1972     }
1973 
1974     ROBIN_HOOD_LOG("mNumElements=" << mNumElements << ", maxNumElementsAllowed=" << maxNumElementsAllowed << ", load="
1975                                    << (static_cast<double>(mNumElements) * 100.0 / (static_cast<double>(mMask) + 1)));
1976     // it seems we have a really bad hash function! don't try to resize again
1977     if (mNumElements * 2 < calcMaxNumElementsAllowed(mMask + 1)) {
1978       throwOverflowError();
1979     }
1980 
1981     rehashPowerOfTwo((mMask + 1) * 2);
1982   }
1983 
1984   void destroy()
1985   {
1986     if (0 == mMask) {
1987       // don't deallocate!
1988       return;
1989     }
1990 
1991     Destroyer<Self, IsFlatMap && std::is_trivially_destructible<Node>::value>{}.nodesDoNotDeallocate(*this);
1992     free(mKeyVals);
1993   }
1994 
1995   void init() noexcept
1996   {
1997     mKeyVals               = reinterpret_cast<Node *>(&mMask);
1998     mInfo                  = reinterpret_cast<uint8_t *>(&mMask);
1999     mNumElements           = 0;
2000     mMask                  = 0;
2001     mMaxNumElementsAllowed = 0;
2002     mInfoInc               = InitialInfoInc;
2003     mInfoHashShift         = InitialInfoHashShift;
2004   }
2005 
2006   // members are sorted so no padding occurs
2007   Node *mKeyVals                = reinterpret_cast<Node *>(&mMask);    // 8 byte  8
2008   uint8_t *mInfo                = reinterpret_cast<uint8_t *>(&mMask); // 8 byte 16
2009   size_t mNumElements           = 0;                                   // 8 byte 24
2010   size_t mMask                  = 0;                                   // 8 byte 32
2011   size_t mMaxNumElementsAllowed = 0;                                   // 8 byte 40
2012   InfoType mInfoInc             = InitialInfoInc;                      // 4 byte 44
2013   InfoType mInfoHashShift       = InitialInfoHashShift;                // 4 byte 48
2014                                                                        // 16 byte 56 if NodeAllocator
2015 };
2016 
2017 } // namespace detail
2018 
2019 template <typename Key, typename T, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
2020           size_t MaxLoadFactor100 = 80>
2021 using unordered_flat_map = detail::unordered_map<true, MaxLoadFactor100, Key, T, Hash, KeyEqual>;
2022 
2023 template <typename Key, typename T, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
2024           size_t MaxLoadFactor100 = 80>
2025 using unordered_node_map = detail::unordered_map<false, MaxLoadFactor100, Key, T, Hash, KeyEqual>;
2026 
2027 template <typename Key, typename T, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>,
2028           size_t MaxLoadFactor100 = 80>
2029 using unordered_map = detail::unordered_map<sizeof(robin_hood::pair<Key, T>) <= sizeof(size_t) * 6 &&
2030                                                 std::is_nothrow_move_constructible<robin_hood::pair<Key, T>>::value &&
2031                                                 std::is_nothrow_move_assignable<robin_hood::pair<Key, T>>::value,
2032                                             MaxLoadFactor100, Key, T, Hash, KeyEqual>;
2033 
2034 } // namespace robin_hood
2035 
2036 #endif