Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-09-18 09:07:41

0001 /* Common base for Boost.Unordered open-addressing tables.
0002  *
0003  * Copyright 2022-2024 Joaquin M Lopez Munoz.
0004  * Copyright 2023 Christian Mazakas.
0005  * Copyright 2024 Braden Ganetsky.
0006  * Distributed under the Boost Software License, Version 1.0.
0007  * (See accompanying file LICENSE_1_0.txt or copy at
0008  * http://www.boost.org/LICENSE_1_0.txt)
0009  *
0010  * See https://www.boost.org/libs/unordered for library home page.
0011  */
0012 
0013 #ifndef BOOST_UNORDERED_DETAIL_FOA_CORE_HPP
0014 #define BOOST_UNORDERED_DETAIL_FOA_CORE_HPP
0015 
0016 #include <boost/assert.hpp>
0017 #include <boost/config.hpp>
0018 #include <boost/config/workaround.hpp>
0019 #include <boost/core/allocator_traits.hpp>
0020 #include <boost/core/bit.hpp>
0021 #include <boost/core/empty_value.hpp>
0022 #include <boost/core/no_exceptions_support.hpp>
0023 #include <boost/core/pointer_traits.hpp>
0024 #include <boost/cstdint.hpp>
0025 #include <boost/predef.h>
0026 #include <boost/unordered/detail/allocator_constructed.hpp>
0027 #include <boost/unordered/detail/narrow_cast.hpp>
0028 #include <boost/unordered/detail/mulx.hpp>
0029 #include <boost/unordered/detail/static_assert.hpp>
0030 #include <boost/unordered/detail/type_traits.hpp>
0031 #include <boost/unordered/hash_traits.hpp>
0032 #include <boost/unordered/unordered_printers.hpp>
0033 #include <climits>
0034 #include <cmath>
0035 #include <cstddef>
0036 #include <cstring>
0037 #include <limits>
0038 #include <memory>
0039 #include <new>
0040 #include <tuple>
0041 #include <type_traits>
0042 #include <utility>
0043 
0044 #if defined(BOOST_UNORDERED_ENABLE_STATS)
0045 #include <boost/unordered/detail/foa/cumulative_stats.hpp>
0046 #endif
0047 
0048 #if !defined(BOOST_UNORDERED_DISABLE_SSE2)
0049 #if defined(BOOST_UNORDERED_ENABLE_SSE2)|| \
0050     defined(__SSE2__)|| \
0051     defined(_M_X64)||(defined(_M_IX86_FP)&&_M_IX86_FP>=2)
0052 #define BOOST_UNORDERED_SSE2
0053 #endif
0054 #endif
0055 
0056 #if !defined(BOOST_UNORDERED_DISABLE_NEON)
0057 #if defined(BOOST_UNORDERED_ENABLE_NEON)||\
0058     (defined(__ARM_NEON)&&!defined(__ARM_BIG_ENDIAN))
0059 #define BOOST_UNORDERED_LITTLE_ENDIAN_NEON
0060 #endif
0061 #endif
0062 
0063 #if defined(BOOST_UNORDERED_SSE2)
0064 #include <emmintrin.h>
0065 #elif defined(BOOST_UNORDERED_LITTLE_ENDIAN_NEON)
0066 #include <arm_neon.h>
0067 #endif
0068 
0069 #ifdef __has_builtin
0070 #define BOOST_UNORDERED_HAS_BUILTIN(x) __has_builtin(x)
0071 #else
0072 #define BOOST_UNORDERED_HAS_BUILTIN(x) 0
0073 #endif
0074 
0075 #if !defined(NDEBUG)
0076 #define BOOST_UNORDERED_ASSUME(cond) BOOST_ASSERT(cond)
0077 #elif BOOST_UNORDERED_HAS_BUILTIN(__builtin_assume)
0078 #define BOOST_UNORDERED_ASSUME(cond) __builtin_assume(cond)
0079 #elif defined(__GNUC__) || BOOST_UNORDERED_HAS_BUILTIN(__builtin_unreachable)
0080 #define BOOST_UNORDERED_ASSUME(cond)    \
0081   do{                                   \
0082     if(!(cond))__builtin_unreachable(); \
0083   }while(0)
0084 #elif defined(_MSC_VER)
0085 #define BOOST_UNORDERED_ASSUME(cond) __assume(cond)
0086 #else
0087 #define BOOST_UNORDERED_ASSUME(cond)  \
0088   do{                                 \
0089     static_cast<void>(false&&(cond)); \
0090   }while(0)
0091 #endif
0092 
0093 /* We use BOOST_UNORDERED_PREFETCH[_ELEMENTS] macros rather than proper
0094  * functions because of https://gcc.gnu.org/bugzilla/show_bug.cgi?id=109985
0095  */
0096 
0097 #if defined(BOOST_GCC)||defined(BOOST_CLANG)
0098 #define BOOST_UNORDERED_PREFETCH(p) __builtin_prefetch((const char*)(p))
0099 #elif defined(BOOST_UNORDERED_SSE2)
0100 #define BOOST_UNORDERED_PREFETCH(p) _mm_prefetch((const char*)(p),_MM_HINT_T0)
0101 #else
0102 #define BOOST_UNORDERED_PREFETCH(p) ((void)(p))
0103 #endif
0104 
0105 /* We have experimentally confirmed that ARM architectures get a higher
0106  * speedup when around the first half of the element slots in a group are
0107  * prefetched, whereas for Intel just the first cache line is best.
0108  * Please report back if you find better tunings for some particular
0109  * architectures.
0110  */
0111 
0112 #if BOOST_ARCH_ARM
0113 /* Cache line size can't be known at compile time, so we settle on
0114  * the very frequent value of 64B.
0115  */
0116 
0117 #define BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N)                          \
0118   do{                                                                   \
0119     auto           BOOST_UNORDERED_P=(p);                               \
0120     constexpr int  cache_line=64;                                       \
0121     const char    *p0=reinterpret_cast<const char*>(BOOST_UNORDERED_P), \
0122                   *p1=p0+sizeof(*BOOST_UNORDERED_P)*(N)/2;              \
0123     for(;p0<p1;p0+=cache_line)BOOST_UNORDERED_PREFETCH(p0);             \
0124   }while(0)
0125 #else
0126 #define BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N) BOOST_UNORDERED_PREFETCH(p)
0127 #endif
0128 
0129 #ifdef __has_feature
0130 #define BOOST_UNORDERED_HAS_FEATURE(x) __has_feature(x)
0131 #else
0132 #define BOOST_UNORDERED_HAS_FEATURE(x) 0
0133 #endif
0134 
0135 #if BOOST_UNORDERED_HAS_FEATURE(thread_sanitizer)|| \
0136     defined(__SANITIZE_THREAD__)
0137 #define BOOST_UNORDERED_THREAD_SANITIZER
0138 #endif
0139 
0140 #define BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred)                    \
0141   static_assert(boost::unordered::detail::is_nothrow_swappable<Hash>::value,   \
0142     "Template parameter Hash is required to be nothrow Swappable.");           \
0143   static_assert(boost::unordered::detail::is_nothrow_swappable<Pred>::value,   \
0144     "Template parameter Pred is required to be nothrow Swappable");
0145 
0146 namespace boost{
0147 namespace unordered{
0148 namespace detail{
0149 namespace foa{
0150 
0151 static constexpr std::size_t default_bucket_count=0;
0152 
0153 /* foa::table_core is the common base of foa::table and foa::concurrent_table,
0154  * which in their turn serve as the foundational core of
0155  * boost::unordered_(flat|node)_(map|set) and boost::concurrent_flat_(map|set),
0156  * respectively. Its main internal design aspects are:
0157  * 
0158  *   - Element slots are logically split into groups of size N=15. The number
0159  *     of groups is always a power of two, so the number of allocated slots
0160        is of the form (N*2^n)-1 (final slot reserved for a sentinel mark).
0161  *   - Positioning is done at the group level rather than the slot level, that
0162  *     is, for any given element its hash value is used to locate a group and
0163  *     insertion is performed on the first available element of that group;
0164  *     if the group is full (overflow), further groups are tried using
0165  *     quadratic probing.
0166  *   - Each group has an associated 16B metadata word holding reduced hash
0167  *     values and overflow information. Reduced hash values are used to
0168  *     accelerate lookup within the group by using 128-bit SIMD or 64-bit word
0169  *     operations.
0170  */
0171 
0172 /* group15 controls metadata information of a group of N=15 element slots.
0173  * The 16B metadata word is organized as follows (LSB depicted rightmost):
0174  *
0175  *   +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
0176  *   |ofw|h14|h13|h13|h11|h10|h09|h08|h07|h06|h05|h04|h03|h02|h01|h00|
0177  *   +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
0178  *
0179  * hi is 0 if the i-th element slot is avalaible, 1 to mark a sentinel and,
0180  * when the slot is occupied, a value in the range [2,255] obtained from the
0181  * element's original hash value.
0182  * ofw is the so-called overflow byte. If insertion of an element with hash
0183  * value h is tried on a full group, then the (h%8)-th bit of the overflow
0184  * byte is set to 1 and a further group is probed. Having an overflow byte
0185  * brings two advantages:
0186  * 
0187  *   - There's no need to reserve a special value of hi to mark tombstone
0188  *     slots; each reduced hash value keeps then log2(254)=7.99 bits of the
0189  *     original hash (alternative approaches reserve one full bit to mark
0190  *     if the slot is available/deleted, so their reduced hash values are 7 bit
0191  *     strong only).
0192  *   - When doing an unsuccessful lookup (i.e. the element is not present in
0193  *     the table), probing stops at the first non-overflowed group. Having 8
0194  *     bits for signalling overflow makes it very likely that we stop at the
0195  *     current group (this happens when no element with the same (h%8) value
0196  *     has overflowed in the group), saving us an additional group check even
0197  *     under high-load/high-erase conditions. It is critical that hash
0198  *     reduction is invariant under modulo 8 (see maybe_caused_overflow).
0199  *
0200  * When looking for an element with hash value h, match(h) returns a bitmask
0201  * signalling which slots have the same reduced hash value. If available,
0202  * match uses SSE2 or (little endian) Neon 128-bit SIMD operations. On non-SIMD
0203  * scenarios, the logical layout described above is physically mapped to two
0204  * 64-bit words with *bit interleaving*, i.e. the least significant 16 bits of
0205  * the first 64-bit word contain the least significant bits of each byte in the
0206  * "logical" 128-bit word, and so forth. With this layout, match can be
0207  * implemented with 4 ANDs, 3 shifts, 2 XORs, 1 OR and 1 NOT.
0208  * 
0209  * IntegralWrapper<Integral> is used to implement group15's underlying
0210  * metadata: it behaves as a plain integral for foa::table or introduces
0211  * atomic ops for foa::concurrent_table. If IntegralWrapper<...> is trivially
0212  * constructible, so is group15, in which case it can be initialized via memset
0213  * etc. Where needed, group15::initialize resets the metadata to the all
0214  * zeros (default state).
0215  */
0216 
0217 #if defined(BOOST_UNORDERED_SSE2)
0218 
0219 template<template<typename> class IntegralWrapper>
0220 struct group15
0221 {
0222   static constexpr std::size_t N=15;
0223   static constexpr bool        regular_layout=true;
0224 
0225   struct dummy_group_type
0226   {
0227     alignas(16) unsigned char storage[N+1]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0};
0228   };
0229 
0230   inline void initialize()
0231   {
0232     _mm_store_si128(
0233       reinterpret_cast<__m128i*>(m),_mm_setzero_si128());
0234   }
0235 
0236   inline void set(std::size_t pos,std::size_t hash)
0237   {
0238     BOOST_ASSERT(pos<N);
0239     at(pos)=reduced_hash(hash);
0240   }
0241 
0242   inline void set_sentinel()
0243   {
0244     at(N-1)=sentinel_;
0245   }
0246 
0247   inline bool is_sentinel(std::size_t pos)const
0248   {
0249     BOOST_ASSERT(pos<N);
0250     return at(pos)==sentinel_;
0251   }
0252 
0253   static inline bool is_sentinel(unsigned char* pc)noexcept
0254   {
0255     return *pc==sentinel_;
0256   }
0257 
0258   inline void reset(std::size_t pos)
0259   {
0260     BOOST_ASSERT(pos<N);
0261     at(pos)=available_;
0262   }
0263 
0264   static inline void reset(unsigned char* pc)
0265   {
0266     *reinterpret_cast<slot_type*>(pc)=available_;
0267   }
0268 
0269   inline int match(std::size_t hash)const
0270   {
0271     return _mm_movemask_epi8(
0272       _mm_cmpeq_epi8(load_metadata(),_mm_set1_epi32(match_word(hash))))&0x7FFF;
0273   }
0274 
0275   inline bool is_not_overflowed(std::size_t hash)const
0276   {
0277     static constexpr unsigned char shift[]={1,2,4,8,16,32,64,128};
0278 
0279     return !(overflow()&shift[hash%8]);
0280   }
0281 
0282   inline void mark_overflow(std::size_t hash)
0283   {
0284     overflow()|=static_cast<unsigned char>(1<<(hash%8));
0285   }
0286 
0287   static inline bool maybe_caused_overflow(unsigned char* pc)
0288   {
0289     std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
0290     group15    *pg=reinterpret_cast<group15*>(pc-pos);
0291     return !pg->is_not_overflowed(*pc);
0292   }
0293 
0294   inline int match_available()const
0295   {
0296     return _mm_movemask_epi8(
0297       _mm_cmpeq_epi8(load_metadata(),_mm_setzero_si128()))&0x7FFF;
0298   }
0299 
0300   inline bool is_occupied(std::size_t pos)const
0301   {
0302     BOOST_ASSERT(pos<N);
0303     return at(pos)!=available_;
0304   }
0305 
0306   static inline bool is_occupied(unsigned char* pc)noexcept
0307   {
0308     return *reinterpret_cast<slot_type*>(pc)!=available_;
0309   }
0310 
0311   inline int match_occupied()const
0312   {
0313     return (~match_available())&0x7FFF;
0314   }
0315 
0316 private:
0317   using slot_type=IntegralWrapper<unsigned char>;
0318   BOOST_UNORDERED_STATIC_ASSERT(sizeof(slot_type)==1);
0319 
0320   static constexpr unsigned char available_=0,
0321                                  sentinel_=1;
0322 
0323   inline __m128i load_metadata()const
0324   {
0325 #if defined(BOOST_UNORDERED_THREAD_SANITIZER)
0326     /* ThreadSanitizer complains on 1-byte atomic writes combined with
0327      * 16-byte atomic reads.
0328      */
0329 
0330     return _mm_set_epi8(
0331       (char)m[15],(char)m[14],(char)m[13],(char)m[12],
0332       (char)m[11],(char)m[10],(char)m[ 9],(char)m[ 8],
0333       (char)m[ 7],(char)m[ 6],(char)m[ 5],(char)m[ 4],
0334       (char)m[ 3],(char)m[ 2],(char)m[ 1],(char)m[ 0]);
0335 #else
0336     return _mm_load_si128(reinterpret_cast<const __m128i*>(m));
0337 #endif
0338   }
0339 
0340   inline static int match_word(std::size_t hash)
0341   {
0342     static constexpr boost::uint32_t word[]=
0343     {
0344       0x08080808u,0x09090909u,0x02020202u,0x03030303u,0x04040404u,0x05050505u,
0345       0x06060606u,0x07070707u,0x08080808u,0x09090909u,0x0A0A0A0Au,0x0B0B0B0Bu,
0346       0x0C0C0C0Cu,0x0D0D0D0Du,0x0E0E0E0Eu,0x0F0F0F0Fu,0x10101010u,0x11111111u,
0347       0x12121212u,0x13131313u,0x14141414u,0x15151515u,0x16161616u,0x17171717u,
0348       0x18181818u,0x19191919u,0x1A1A1A1Au,0x1B1B1B1Bu,0x1C1C1C1Cu,0x1D1D1D1Du,
0349       0x1E1E1E1Eu,0x1F1F1F1Fu,0x20202020u,0x21212121u,0x22222222u,0x23232323u,
0350       0x24242424u,0x25252525u,0x26262626u,0x27272727u,0x28282828u,0x29292929u,
0351       0x2A2A2A2Au,0x2B2B2B2Bu,0x2C2C2C2Cu,0x2D2D2D2Du,0x2E2E2E2Eu,0x2F2F2F2Fu,
0352       0x30303030u,0x31313131u,0x32323232u,0x33333333u,0x34343434u,0x35353535u,
0353       0x36363636u,0x37373737u,0x38383838u,0x39393939u,0x3A3A3A3Au,0x3B3B3B3Bu,
0354       0x3C3C3C3Cu,0x3D3D3D3Du,0x3E3E3E3Eu,0x3F3F3F3Fu,0x40404040u,0x41414141u,
0355       0x42424242u,0x43434343u,0x44444444u,0x45454545u,0x46464646u,0x47474747u,
0356       0x48484848u,0x49494949u,0x4A4A4A4Au,0x4B4B4B4Bu,0x4C4C4C4Cu,0x4D4D4D4Du,
0357       0x4E4E4E4Eu,0x4F4F4F4Fu,0x50505050u,0x51515151u,0x52525252u,0x53535353u,
0358       0x54545454u,0x55555555u,0x56565656u,0x57575757u,0x58585858u,0x59595959u,
0359       0x5A5A5A5Au,0x5B5B5B5Bu,0x5C5C5C5Cu,0x5D5D5D5Du,0x5E5E5E5Eu,0x5F5F5F5Fu,
0360       0x60606060u,0x61616161u,0x62626262u,0x63636363u,0x64646464u,0x65656565u,
0361       0x66666666u,0x67676767u,0x68686868u,0x69696969u,0x6A6A6A6Au,0x6B6B6B6Bu,
0362       0x6C6C6C6Cu,0x6D6D6D6Du,0x6E6E6E6Eu,0x6F6F6F6Fu,0x70707070u,0x71717171u,
0363       0x72727272u,0x73737373u,0x74747474u,0x75757575u,0x76767676u,0x77777777u,
0364       0x78787878u,0x79797979u,0x7A7A7A7Au,0x7B7B7B7Bu,0x7C7C7C7Cu,0x7D7D7D7Du,
0365       0x7E7E7E7Eu,0x7F7F7F7Fu,0x80808080u,0x81818181u,0x82828282u,0x83838383u,
0366       0x84848484u,0x85858585u,0x86868686u,0x87878787u,0x88888888u,0x89898989u,
0367       0x8A8A8A8Au,0x8B8B8B8Bu,0x8C8C8C8Cu,0x8D8D8D8Du,0x8E8E8E8Eu,0x8F8F8F8Fu,
0368       0x90909090u,0x91919191u,0x92929292u,0x93939393u,0x94949494u,0x95959595u,
0369       0x96969696u,0x97979797u,0x98989898u,0x99999999u,0x9A9A9A9Au,0x9B9B9B9Bu,
0370       0x9C9C9C9Cu,0x9D9D9D9Du,0x9E9E9E9Eu,0x9F9F9F9Fu,0xA0A0A0A0u,0xA1A1A1A1u,
0371       0xA2A2A2A2u,0xA3A3A3A3u,0xA4A4A4A4u,0xA5A5A5A5u,0xA6A6A6A6u,0xA7A7A7A7u,
0372       0xA8A8A8A8u,0xA9A9A9A9u,0xAAAAAAAAu,0xABABABABu,0xACACACACu,0xADADADADu,
0373       0xAEAEAEAEu,0xAFAFAFAFu,0xB0B0B0B0u,0xB1B1B1B1u,0xB2B2B2B2u,0xB3B3B3B3u,
0374       0xB4B4B4B4u,0xB5B5B5B5u,0xB6B6B6B6u,0xB7B7B7B7u,0xB8B8B8B8u,0xB9B9B9B9u,
0375       0xBABABABAu,0xBBBBBBBBu,0xBCBCBCBCu,0xBDBDBDBDu,0xBEBEBEBEu,0xBFBFBFBFu,
0376       0xC0C0C0C0u,0xC1C1C1C1u,0xC2C2C2C2u,0xC3C3C3C3u,0xC4C4C4C4u,0xC5C5C5C5u,
0377       0xC6C6C6C6u,0xC7C7C7C7u,0xC8C8C8C8u,0xC9C9C9C9u,0xCACACACAu,0xCBCBCBCBu,
0378       0xCCCCCCCCu,0xCDCDCDCDu,0xCECECECEu,0xCFCFCFCFu,0xD0D0D0D0u,0xD1D1D1D1u,
0379       0xD2D2D2D2u,0xD3D3D3D3u,0xD4D4D4D4u,0xD5D5D5D5u,0xD6D6D6D6u,0xD7D7D7D7u,
0380       0xD8D8D8D8u,0xD9D9D9D9u,0xDADADADAu,0xDBDBDBDBu,0xDCDCDCDCu,0xDDDDDDDDu,
0381       0xDEDEDEDEu,0xDFDFDFDFu,0xE0E0E0E0u,0xE1E1E1E1u,0xE2E2E2E2u,0xE3E3E3E3u,
0382       0xE4E4E4E4u,0xE5E5E5E5u,0xE6E6E6E6u,0xE7E7E7E7u,0xE8E8E8E8u,0xE9E9E9E9u,
0383       0xEAEAEAEAu,0xEBEBEBEBu,0xECECECECu,0xEDEDEDEDu,0xEEEEEEEEu,0xEFEFEFEFu,
0384       0xF0F0F0F0u,0xF1F1F1F1u,0xF2F2F2F2u,0xF3F3F3F3u,0xF4F4F4F4u,0xF5F5F5F5u,
0385       0xF6F6F6F6u,0xF7F7F7F7u,0xF8F8F8F8u,0xF9F9F9F9u,0xFAFAFAFAu,0xFBFBFBFBu,
0386       0xFCFCFCFCu,0xFDFDFDFDu,0xFEFEFEFEu,0xFFFFFFFFu,
0387     };
0388 
0389     return (int)word[narrow_cast<unsigned char>(hash)];
0390   }
0391 
0392   inline static unsigned char reduced_hash(std::size_t hash)
0393   {
0394     return narrow_cast<unsigned char>(match_word(hash));
0395   }
0396 
0397   inline slot_type& at(std::size_t pos)
0398   {
0399     return m[pos];
0400   }
0401 
0402   inline const slot_type& at(std::size_t pos)const
0403   {
0404     return m[pos];
0405   }
0406 
0407   inline slot_type& overflow()
0408   {
0409     return at(N);
0410   }
0411 
0412   inline const slot_type& overflow()const
0413   {
0414     return at(N);
0415   }
0416 
0417   alignas(16) slot_type m[16];
0418 };
0419 
0420 #elif defined(BOOST_UNORDERED_LITTLE_ENDIAN_NEON)
0421 
0422 template<template<typename> class IntegralWrapper>
0423 struct group15
0424 {
0425   static constexpr std::size_t N=15;
0426   static constexpr bool        regular_layout=true;
0427 
0428   struct dummy_group_type
0429   {
0430     alignas(16) unsigned char storage[N+1]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0};
0431   };
0432 
0433   inline void initialize()
0434   {
0435     vst1q_u8(reinterpret_cast<uint8_t*>(m),vdupq_n_u8(0));
0436   }
0437 
0438   inline void set(std::size_t pos,std::size_t hash)
0439   {
0440     BOOST_ASSERT(pos<N);
0441     at(pos)=reduced_hash(hash);
0442   }
0443 
0444   inline void set_sentinel()
0445   {
0446     at(N-1)=sentinel_;
0447   }
0448 
0449   inline bool is_sentinel(std::size_t pos)const
0450   {
0451     BOOST_ASSERT(pos<N);
0452     return pos==N-1&&at(N-1)==sentinel_;
0453   }
0454 
0455   static inline bool is_sentinel(unsigned char* pc)noexcept
0456   {
0457     return *reinterpret_cast<slot_type*>(pc)==sentinel_;
0458   }
0459 
0460   inline void reset(std::size_t pos)
0461   {
0462     BOOST_ASSERT(pos<N);
0463     at(pos)=available_;
0464   }
0465 
0466   static inline void reset(unsigned char* pc)
0467   {
0468     *reinterpret_cast<slot_type*>(pc)=available_;
0469   }
0470 
0471   inline int match(std::size_t hash)const
0472   {
0473     return simde_mm_movemask_epi8(vceqq_u8(
0474       load_metadata(),vdupq_n_u8(reduced_hash(hash))))&0x7FFF;
0475   }
0476 
0477   inline bool is_not_overflowed(std::size_t hash)const
0478   {
0479     static constexpr unsigned char shift[]={1,2,4,8,16,32,64,128};
0480 
0481     return !(overflow()&shift[hash%8]);
0482   }
0483 
0484   inline void mark_overflow(std::size_t hash)
0485   {
0486     overflow()|=static_cast<unsigned char>(1<<(hash%8));
0487   }
0488 
0489   static inline bool maybe_caused_overflow(unsigned char* pc)
0490   {
0491     std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
0492     group15    *pg=reinterpret_cast<group15*>(pc-pos);
0493     return !pg->is_not_overflowed(*pc);
0494   };
0495 
0496   inline int match_available()const
0497   {
0498     return simde_mm_movemask_epi8(vceqq_u8(
0499       load_metadata(),vdupq_n_u8(0)))&0x7FFF;
0500   }
0501 
0502   inline bool is_occupied(std::size_t pos)const
0503   {
0504     BOOST_ASSERT(pos<N);
0505     return at(pos)!=available_;
0506   }
0507 
0508   static inline bool is_occupied(unsigned char* pc)noexcept
0509   {
0510     return *reinterpret_cast<slot_type*>(pc)!=available_;
0511   }
0512 
0513   inline int match_occupied()const
0514   {
0515     return simde_mm_movemask_epi8(vcgtq_u8(
0516       load_metadata(),vdupq_n_u8(0)))&0x7FFF;
0517   }
0518 
0519 private:
0520   using slot_type=IntegralWrapper<unsigned char>;
0521   BOOST_UNORDERED_STATIC_ASSERT(sizeof(slot_type)==1);
0522 
0523   static constexpr unsigned char available_=0,
0524                                  sentinel_=1;
0525 
0526   inline uint8x16_t load_metadata()const
0527   {
0528 #if defined(BOOST_UNORDERED_THREAD_SANITIZER)
0529     /* ThreadSanitizer complains on 1-byte atomic writes combined with
0530      * 16-byte atomic reads.
0531      */
0532 
0533     alignas(16) uint8_t data[16]={
0534       m[ 0],m[ 1],m[ 2],m[ 3],m[ 4],m[ 5],m[ 6],m[ 7],
0535       m[ 8],m[ 9],m[10],m[11],m[12],m[13],m[14],m[15]};
0536     return vld1q_u8(data);
0537 #else
0538     return vld1q_u8(reinterpret_cast<const uint8_t*>(m));
0539 #endif
0540   }
0541 
0542   inline static unsigned char reduced_hash(std::size_t hash)
0543   {
0544     static constexpr unsigned char table[]={
0545       8,9,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
0546       16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
0547       32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
0548       48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
0549       64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
0550       80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
0551       96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
0552       112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
0553       128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
0554       144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
0555       160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
0556       176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
0557       192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
0558       208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
0559       224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
0560       240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
0561     };
0562     
0563     return table[(unsigned char)hash];
0564   }
0565 
0566   /* Copied from 
0567    * https://github.com/simd-everywhere/simde/blob/master/simde/x86/
0568    * sse2.h#L3763
0569    */
0570 
0571   static inline int simde_mm_movemask_epi8(uint8x16_t a)
0572   {
0573     static constexpr uint8_t md[16]={
0574       1 << 0, 1 << 1, 1 << 2, 1 << 3,
0575       1 << 4, 1 << 5, 1 << 6, 1 << 7,
0576       1 << 0, 1 << 1, 1 << 2, 1 << 3,
0577       1 << 4, 1 << 5, 1 << 6, 1 << 7,
0578     };
0579 
0580     uint8x16_t  masked=vandq_u8(vld1q_u8(md),a);
0581     uint8x8x2_t tmp=vzip_u8(vget_low_u8(masked),vget_high_u8(masked));
0582     uint16x8_t  x=vreinterpretq_u16_u8(vcombine_u8(tmp.val[0],tmp.val[1]));
0583 
0584 #if defined(__ARM_ARCH_ISA_A64)
0585     return vaddvq_u16(x);
0586 #else
0587     uint64x2_t t64=vpaddlq_u32(vpaddlq_u16(x));
0588     return int(vgetq_lane_u64(t64,0))+int(vgetq_lane_u64(t64,1));
0589 #endif
0590   }
0591 
0592   inline slot_type& at(std::size_t pos)
0593   {
0594     return m[pos];
0595   }
0596 
0597   inline const slot_type& at(std::size_t pos)const
0598   {
0599     return m[pos];
0600   }
0601 
0602   inline slot_type& overflow()
0603   {
0604     return at(N);
0605   }
0606 
0607   inline const slot_type& overflow()const
0608   {
0609     return at(N);
0610   }
0611 
0612   alignas(16) slot_type m[16];
0613 };
0614 
0615 #else /* non-SIMD */
0616 
0617 template<template<typename> class IntegralWrapper>
0618 struct group15
0619 {
0620   static constexpr std::size_t N=15;
0621   static constexpr bool        regular_layout=false;
0622 
0623   struct dummy_group_type
0624   {
0625     alignas(16) boost::uint64_t m[2]=
0626       {0x0000000000004000ull,0x0000000000000000ull};
0627   };
0628 
0629   inline void initialize(){m[0]=0;m[1]=0;}
0630 
0631   inline void set(std::size_t pos,std::size_t hash)
0632   {
0633     BOOST_ASSERT(pos<N);
0634     set_impl(pos,reduced_hash(hash));
0635   }
0636 
0637   inline void set_sentinel()
0638   {
0639     set_impl(N-1,sentinel_);
0640   }
0641 
0642   inline bool is_sentinel(std::size_t pos)const
0643   {
0644     BOOST_ASSERT(pos<N);
0645     return 
0646       pos==N-1&&
0647       (m[0] & boost::uint64_t(0x4000400040004000ull))==
0648         boost::uint64_t(0x4000ull)&&
0649       (m[1] & boost::uint64_t(0x4000400040004000ull))==0;
0650   }
0651 
0652   inline void reset(std::size_t pos)
0653   {
0654     BOOST_ASSERT(pos<N);
0655     set_impl(pos,available_);
0656   }
0657 
0658   static inline void reset(unsigned char* pc)
0659   {
0660     std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
0661     pc-=pos;
0662     reinterpret_cast<group15*>(pc)->reset(pos);
0663   }
0664 
0665   inline int match(std::size_t hash)const
0666   {
0667     return match_impl(reduced_hash(hash));
0668   }
0669 
0670   inline bool is_not_overflowed(std::size_t hash)const
0671   {
0672     return !(reinterpret_cast<const boost::uint16_t*>(m)[hash%8] & 0x8000u);
0673   }
0674 
0675   inline void mark_overflow(std::size_t hash)
0676   {
0677     reinterpret_cast<boost::uint16_t*>(m)[hash%8]|=0x8000u;
0678   }
0679 
0680   static inline bool maybe_caused_overflow(unsigned char* pc)
0681   {
0682     std::size_t     pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
0683     group15        *pg=reinterpret_cast<group15*>(pc-pos);
0684     boost::uint64_t x=((pg->m[0])>>pos)&0x000100010001ull;
0685     boost::uint32_t y=narrow_cast<boost::uint32_t>(x|(x>>15)|(x>>30));
0686     return !pg->is_not_overflowed(y);
0687   };
0688 
0689   inline int match_available()const
0690   {
0691     boost::uint64_t x=~(m[0]|m[1]);
0692     boost::uint32_t y=static_cast<boost::uint32_t>(x&(x>>32));
0693     y&=y>>16;
0694     return y&0x7FFF;
0695   }
0696 
0697   inline bool is_occupied(std::size_t pos)const
0698   {
0699     BOOST_ASSERT(pos<N);
0700     boost::uint64_t x=m[0]|m[1];
0701     return (x&(0x0001000100010001ull<<pos))!=0;
0702   }
0703 
0704   inline int match_occupied()const
0705   {
0706     boost::uint64_t x=m[0]|m[1];
0707     boost::uint32_t y=narrow_cast<boost::uint32_t>(x|(x>>32));
0708     y|=y>>16;
0709     return y&0x7FFF;
0710   }
0711 
0712 private:
0713   using word_type=IntegralWrapper<uint64_t>;
0714   BOOST_UNORDERED_STATIC_ASSERT(sizeof(word_type)==8);
0715 
0716   static constexpr unsigned char available_=0,
0717                                  sentinel_=1;
0718 
0719   inline static unsigned char reduced_hash(std::size_t hash)
0720   {
0721     static constexpr unsigned char table[]={
0722       8,9,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
0723       16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
0724       32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
0725       48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
0726       64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
0727       80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
0728       96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
0729       112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
0730       128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
0731       144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
0732       160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
0733       176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
0734       192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
0735       208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
0736       224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
0737       240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
0738     };
0739     
0740     return table[narrow_cast<unsigned char>(hash)];
0741   }
0742 
0743   inline void set_impl(std::size_t pos,std::size_t n)
0744   {
0745     BOOST_ASSERT(n<256);
0746     set_impl(m[0],pos,n&0xFu);
0747     set_impl(m[1],pos,n>>4);
0748   }
0749 
0750   static inline void set_impl(word_type& x,std::size_t pos,std::size_t n)
0751   {
0752     static constexpr boost::uint64_t mask[]=
0753     {
0754       0x0000000000000000ull,0x0000000000000001ull,0x0000000000010000ull,
0755       0x0000000000010001ull,0x0000000100000000ull,0x0000000100000001ull,
0756       0x0000000100010000ull,0x0000000100010001ull,0x0001000000000000ull,
0757       0x0001000000000001ull,0x0001000000010000ull,0x0001000000010001ull,
0758       0x0001000100000000ull,0x0001000100000001ull,0x0001000100010000ull,
0759       0x0001000100010001ull,
0760     };
0761     static constexpr boost::uint64_t imask[]=
0762     {
0763       0x0001000100010001ull,0x0001000100010000ull,0x0001000100000001ull,
0764       0x0001000100000000ull,0x0001000000010001ull,0x0001000000010000ull,
0765       0x0001000000000001ull,0x0001000000000000ull,0x0000000100010001ull,
0766       0x0000000100010000ull,0x0000000100000001ull,0x0000000100000000ull,
0767       0x0000000000010001ull,0x0000000000010000ull,0x0000000000000001ull,
0768       0x0000000000000000ull,
0769     };
0770 
0771     BOOST_ASSERT(pos<16&&n<16);
0772     x|=   mask[n]<<pos;
0773     x&=~(imask[n]<<pos);
0774   }
0775 
0776   inline int match_impl(std::size_t n)const
0777   {
0778     static constexpr boost::uint64_t mask[]=
0779     {
0780       0x0000000000000000ull,0x000000000000ffffull,0x00000000ffff0000ull,
0781       0x00000000ffffffffull,0x0000ffff00000000ull,0x0000ffff0000ffffull,
0782       0x0000ffffffff0000ull,0x0000ffffffffffffull,0xffff000000000000ull,
0783       0xffff00000000ffffull,0xffff0000ffff0000ull,0xffff0000ffffffffull,
0784       0xffffffff00000000ull,0xffffffff0000ffffull,0xffffffffffff0000ull,
0785       0xffffffffffffffffull,
0786     };
0787 
0788     BOOST_ASSERT(n<256);
0789     boost::uint64_t x=m[0]^mask[n&0xFu];
0790                     x=~((m[1]^mask[n>>4])|x);
0791     boost::uint32_t y=static_cast<boost::uint32_t>(x&(x>>32));
0792                     y&=y>>16;
0793     return          y&0x7FFF;
0794   }
0795 
0796   alignas(16) word_type m[2];
0797 };
0798 
0799 #endif
0800 
0801 /* foa::table_core uses a size policy to obtain the permissible sizes of the
0802  * group array (and, by implication, the element array) and to do the
0803  * hash->group mapping.
0804  * 
0805  *   - size_index(n) returns an unspecified "index" number used in other policy
0806  *     operations.
0807  *   - size(size_index_) returns the number of groups for the given index. It
0808  *     is guaranteed that size(size_index(n)) >= n.
0809  *   - min_size() is the minimum number of groups permissible, i.e.
0810  *     size(size_index(0)).
0811  *   - position(hash,size_index_) maps hash to a position in the range
0812  *     [0,size(size_index_)).
0813  * 
0814  * The reason we're introducing the intermediate index value for calculating
0815  * sizes and positions is that it allows us to optimize the implementation of
0816  * position, which is in the hot path of lookup and insertion operations:
0817  * pow2_size_policy, the actual size policy used by foa::table, returns 2^n
0818  * (n>0) as permissible sizes and returns the n most significant bits
0819  * of the hash value as the position in the group array; using a size index
0820  * defined as i = (bits in std::size_t) - n, we have an unbeatable
0821  * implementation of position(hash) as hash>>i.
0822  * There's a twofold reason for choosing the high bits of hash for positioning:
0823  *   - Multiplication-based mixing tends to yield better entropy in the high
0824  *     part of its result.
0825  *   - group15 reduced-hash values take the *low* bits of hash, and we want
0826  *     these values and positioning to be as uncorrelated as possible.
0827  */
0828 
0829 struct pow2_size_policy
0830 {
0831   static inline std::size_t size_index(std::size_t n)
0832   {
0833     // TODO: min size is 2, see if we can bring it down to 1 without loss
0834     // of performance
0835 
0836     return sizeof(std::size_t)*CHAR_BIT-
0837       (n<=2?1:((std::size_t)(boost::core::bit_width(n-1))));
0838   }
0839 
0840   static inline std::size_t size(std::size_t size_index_)
0841   {
0842      return std::size_t(1)<<(sizeof(std::size_t)*CHAR_BIT-size_index_);  
0843   }
0844     
0845   static constexpr std::size_t min_size(){return 2;}
0846 
0847   static inline std::size_t position(std::size_t hash,std::size_t size_index_)
0848   {
0849     return hash>>size_index_;
0850   }
0851 };
0852 
0853 /* size index of a group array for a given *element* capacity */
0854 
0855 template<typename Group,typename SizePolicy>
0856 static inline std::size_t size_index_for(std::size_t n)
0857 {
0858   /* n/N+1 == ceil((n+1)/N) (extra +1 for the sentinel) */
0859   return SizePolicy::size_index(n/Group::N+1);
0860 }
0861 
0862 /* Quadratic prober over a power-of-two range using triangular numbers.
0863  * mask in next(mask) must be the range size minus one (and since size is 2^n,
0864  * mask has exactly its n first bits set to 1).
0865  */
0866 
0867 struct pow2_quadratic_prober
0868 {
0869   pow2_quadratic_prober(std::size_t pos_):pos{pos_}{}
0870 
0871   inline std::size_t get()const{return pos;}
0872   inline std::size_t length()const{return step+1;}
0873 
0874   /* next returns false when the whole array has been traversed, which ends
0875    * probing (in practice, full-table probing will only happen with very small
0876    * arrays).
0877    */
0878 
0879   inline bool next(std::size_t mask)
0880   {
0881     step+=1;
0882     pos=(pos+step)&mask;
0883     return step<=mask;
0884   }
0885 
0886 private:
0887   std::size_t pos,step=0;
0888 };
0889 
0890 /* Mixing policies: no_mix is the identity function, and mulx_mix
0891  * uses the mulx function from <boost/unordered/detail/mulx.hpp>.
0892  *
0893  * foa::table_core mixes hash results with mulx_mix unless the hash is marked
0894  * as avalanching, i.e. of good quality
0895  * (see <boost/unordered/hash_traits.hpp>).
0896  */
0897 
0898 struct no_mix
0899 {
0900   template<typename Hash,typename T>
0901   static inline std::size_t mix(const Hash& h,const T& x)
0902   {
0903     return h(x);
0904   }
0905 };
0906 
0907 struct mulx_mix
0908 {
0909   template<typename Hash,typename T>
0910   static inline std::size_t mix(const Hash& h,const T& x)
0911   {
0912     return mulx(h(x));
0913   }
0914 };
0915 
0916 /* boost::core::countr_zero has a potentially costly check for
0917  * the case x==0.
0918  */
0919 
0920 inline unsigned int unchecked_countr_zero(int x)
0921 {
0922 #if defined(BOOST_MSVC)
0923   unsigned long r;
0924   _BitScanForward(&r,(unsigned long)x);
0925   return (unsigned int)r;
0926 #else
0927   BOOST_UNORDERED_ASSUME(x!=0);
0928   return (unsigned int)boost::core::countr_zero((unsigned int)x);
0929 #endif
0930 }
0931 
0932 /* table_arrays controls allocation, initialization and deallocation of
0933  * paired arrays of groups and element slots. Only one chunk of memory is
0934  * allocated to place both arrays: this is not done for efficiency reasons,
0935  * but in order to be able to properly align the group array without storing
0936  * additional offset information --the alignment required (16B) is usually
0937  * greater than alignof(std::max_align_t) and thus not guaranteed by
0938  * allocators.
0939  */
0940 
0941 template<typename Group,std::size_t Size>
0942 Group* dummy_groups()
0943 {
0944   /* Dummy storage initialized as if in an empty container (actually, each
0945    * of its groups is initialized like a separate empty container).
0946    * We make table_arrays::groups point to this when capacity()==0, so that
0947    * we are not allocating any dynamic memory and yet lookup can be implemented
0948    * without checking for groups==nullptr. This space won't ever be used for
0949    * insertion as the container's capacity is precisely zero.
0950    */
0951 
0952   static constexpr typename Group::dummy_group_type
0953   storage[Size]={typename Group::dummy_group_type(),};
0954 
0955   return reinterpret_cast<Group*>(
0956     const_cast<typename Group::dummy_group_type*>(storage));
0957 }
0958 
0959 template<
0960   typename Ptr,typename Ptr2,
0961   typename std::enable_if<!std::is_same<Ptr,Ptr2>::value>::type* = nullptr
0962 >
0963 Ptr to_pointer(Ptr2 p)
0964 {
0965   if(!p){return nullptr;}
0966   return boost::pointer_traits<Ptr>::pointer_to(*p);
0967 }
0968 
0969 template<typename Ptr>
0970 Ptr to_pointer(Ptr p)
0971 {
0972   return p;
0973 }
0974 
0975 template<typename Arrays,typename Allocator>
0976 struct arrays_holder
0977 {
0978   arrays_holder(const Arrays& arrays,const Allocator& al):
0979     arrays_{arrays},al_{al}
0980   {}
0981   
0982   /* not defined but VS in pre-C++17 mode needs to see it for RVO */
0983   arrays_holder(arrays_holder const&);
0984   arrays_holder& operator=(arrays_holder const&)=delete;
0985 
0986   ~arrays_holder()
0987   {
0988     if(!released_){
0989       arrays_.delete_(typename Arrays::allocator_type(al_),arrays_);
0990     }
0991   }
0992 
0993   const Arrays& release()
0994   {
0995     released_=true;
0996     return arrays_;
0997   }
0998 
0999 private:
1000   Arrays    arrays_;
1001   Allocator al_;
1002   bool      released_=false;
1003 };
1004 
1005 template<typename Value,typename Group,typename SizePolicy,typename Allocator>
1006 struct table_arrays
1007 {
1008   using allocator_type=typename boost::allocator_rebind<Allocator,Value>::type;
1009 
1010   using value_type=Value;
1011   using group_type=Group;
1012   static constexpr auto N=group_type::N;
1013   using size_policy=SizePolicy;
1014   using value_type_pointer=
1015     typename boost::allocator_pointer<allocator_type>::type;
1016   using group_type_pointer=
1017     typename boost::pointer_traits<value_type_pointer>::template
1018       rebind<group_type>;
1019   using group_type_pointer_traits=boost::pointer_traits<group_type_pointer>;
1020 
1021   // For natvis purposes
1022   using char_pointer=
1023     typename boost::pointer_traits<value_type_pointer>::template
1024       rebind<unsigned char>;
1025 
1026   table_arrays(
1027     std::size_t gsi,std::size_t gsm,
1028     group_type_pointer pg,value_type_pointer pe):
1029     groups_size_index{gsi},groups_size_mask{gsm},groups_{pg},elements_{pe}{}
1030 
1031   value_type* elements()const noexcept{return boost::to_address(elements_);}
1032   group_type* groups()const noexcept{return boost::to_address(groups_);}
1033 
1034   static void set_arrays(table_arrays& arrays,allocator_type al,std::size_t n)
1035   {
1036     return set_arrays(
1037       arrays,al,n,std::is_same<group_type*,group_type_pointer>{});
1038   }
1039 
1040   static void set_arrays(
1041     table_arrays& arrays,allocator_type al,std::size_t,
1042     std::false_type /* always allocate */)
1043   {
1044     using storage_traits=boost::allocator_traits<allocator_type>;
1045     auto groups_size_index=arrays.groups_size_index;
1046     auto groups_size=size_policy::size(groups_size_index);
1047 
1048     auto sal=allocator_type(al);
1049     arrays.elements_=storage_traits::allocate(sal,buffer_size(groups_size));
1050     
1051     /* Align arrays.groups to sizeof(group_type). table_iterator critically
1052       * depends on such alignment for its increment operation.
1053       */
1054 
1055     auto p=reinterpret_cast<unsigned char*>(arrays.elements()+groups_size*N-1);
1056     p+=(uintptr_t(sizeof(group_type))-
1057         reinterpret_cast<uintptr_t>(p))%sizeof(group_type);
1058     arrays.groups_=
1059       group_type_pointer_traits::pointer_to(*reinterpret_cast<group_type*>(p));
1060 
1061     initialize_groups(
1062       arrays.groups(),groups_size,
1063       is_trivially_default_constructible<group_type>{});
1064     arrays.groups()[groups_size-1].set_sentinel();
1065   }
1066 
1067   static void set_arrays(
1068     table_arrays& arrays,allocator_type al,std::size_t n,
1069     std::true_type /* optimize for n==0*/)
1070   {
1071     if(!n){
1072       arrays.groups_=dummy_groups<group_type,size_policy::min_size()>();
1073     }
1074     else{
1075       set_arrays(arrays,al,n,std::false_type{});
1076     }
1077   }
1078 
1079   static table_arrays new_(allocator_type al,std::size_t n)
1080   {
1081     auto         groups_size_index=size_index_for<group_type,size_policy>(n);
1082     auto         groups_size=size_policy::size(groups_size_index);
1083     table_arrays arrays{groups_size_index,groups_size-1,nullptr,nullptr};
1084 
1085     set_arrays(arrays,al,n);
1086     return arrays;
1087   }
1088 
1089   static void delete_(allocator_type al,table_arrays& arrays)noexcept
1090   {
1091     using storage_traits=boost::allocator_traits<allocator_type>;
1092 
1093     auto sal=allocator_type(al);
1094     if(arrays.elements()){
1095       storage_traits::deallocate(
1096         sal,arrays.elements_,buffer_size(arrays.groups_size_mask+1));
1097     }
1098   }
1099 
1100   /* combined space for elements and groups measured in sizeof(value_type)s */
1101 
1102   static std::size_t buffer_size(std::size_t groups_size)
1103   {
1104     auto buffer_bytes=
1105       /* space for elements (we subtract 1 because of the sentinel) */
1106       sizeof(value_type)*(groups_size*N-1)+
1107       /* space for groups + padding for group alignment */
1108       sizeof(group_type)*(groups_size+1)-1;
1109 
1110     /* ceil(buffer_bytes/sizeof(value_type)) */
1111     return (buffer_bytes+sizeof(value_type)-1)/sizeof(value_type);
1112   }
1113 
1114   static void initialize_groups(
1115     group_type* pg,std::size_t size,std::true_type /* memset */)
1116   {
1117     /* memset faster/not slower than manual, assumes all zeros is group_type's
1118      * default layout.
1119      * reinterpret_cast: GCC may complain about group_type not being trivially
1120      * copy-assignable when we're relying on trivial copy constructibility.
1121      */
1122 
1123     std::memset(
1124       reinterpret_cast<unsigned char*>(pg),0,sizeof(group_type)*size);
1125   }
1126 
1127   static void initialize_groups(
1128     group_type* pg,std::size_t size,std::false_type /* manual */)
1129   {
1130     while(size--!=0)::new (pg++) group_type();
1131   }
1132 
1133   std::size_t        groups_size_index;
1134   std::size_t        groups_size_mask;
1135   group_type_pointer groups_;
1136   value_type_pointer elements_;
1137 };
1138 
1139 #if defined(BOOST_UNORDERED_ENABLE_STATS)
1140 /* stats support */
1141 
1142 struct table_core_cumulative_stats
1143 {
1144   concurrent_cumulative_stats<1> insertion;
1145   concurrent_cumulative_stats<2> successful_lookup,
1146                                  unsuccessful_lookup;
1147 };
1148 
1149 struct table_core_insertion_stats
1150 {
1151   std::size_t            count;
1152   sequence_stats_summary probe_length;
1153 };
1154 
1155 struct table_core_lookup_stats
1156 {
1157   std::size_t            count;
1158   sequence_stats_summary probe_length;
1159   sequence_stats_summary num_comparisons;
1160 };
1161 
1162 struct table_core_stats
1163 {
1164   table_core_insertion_stats insertion;
1165   table_core_lookup_stats    successful_lookup,
1166                              unsuccessful_lookup;
1167 };
1168 
1169 #define BOOST_UNORDERED_ADD_STATS(stats,args) stats.add args
1170 #define BOOST_UNORDERED_SWAP_STATS(stats1,stats2) std::swap(stats1,stats2)
1171 #define BOOST_UNORDERED_COPY_STATS(stats1,stats2) stats1=stats2
1172 #define BOOST_UNORDERED_RESET_STATS_OF(x) x.reset_stats()
1173 #define BOOST_UNORDERED_STATS_COUNTER(name) std::size_t name=0
1174 #define BOOST_UNORDERED_INCREMENT_STATS_COUNTER(name) ++name
1175 
1176 #else
1177 
1178 #define BOOST_UNORDERED_ADD_STATS(stats,args) ((void)0)
1179 #define BOOST_UNORDERED_SWAP_STATS(stats1,stats2) ((void)0)
1180 #define BOOST_UNORDERED_COPY_STATS(stats1,stats2) ((void)0)
1181 #define BOOST_UNORDERED_RESET_STATS_OF(x) ((void)0)
1182 #define BOOST_UNORDERED_STATS_COUNTER(name) ((void)0)
1183 #define BOOST_UNORDERED_INCREMENT_STATS_COUNTER(name) ((void)0)
1184 
1185 #endif
1186 
1187 struct if_constexpr_void_else{void operator()()const{}};
1188 
1189 template<bool B,typename F,typename G=if_constexpr_void_else>
1190 void if_constexpr(F f,G g={})
1191 {
1192   std::get<B?0:1>(std::forward_as_tuple(f,g))();
1193 }
1194 
1195 template<bool B,typename T,typename std::enable_if<B>::type* =nullptr>
1196 void copy_assign_if(T& x,const T& y){x=y;}
1197 
1198 template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr>
1199 void copy_assign_if(T&,const T&){}
1200 
1201 template<bool B,typename T,typename std::enable_if<B>::type* =nullptr>
1202 void move_assign_if(T& x,T& y){x=std::move(y);}
1203 
1204 template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr>
1205 void move_assign_if(T&,T&){}
1206 
1207 template<bool B,typename T,typename std::enable_if<B>::type* =nullptr>
1208 void swap_if(T& x,T& y){using std::swap; swap(x,y);}
1209 
1210 template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr>
1211 void swap_if(T&,T&){}
1212 
1213 template<typename Allocator>
1214 struct is_std_allocator:std::false_type{};
1215 
1216 template<typename T>
1217 struct is_std_allocator<std::allocator<T>>:std::true_type{};
1218 
1219 /* std::allocator::construct marked as deprecated */
1220 #if defined(_LIBCPP_SUPPRESS_DEPRECATED_PUSH)
1221 _LIBCPP_SUPPRESS_DEPRECATED_PUSH
1222 #elif defined(_STL_DISABLE_DEPRECATED_WARNING)
1223 _STL_DISABLE_DEPRECATED_WARNING
1224 #elif defined(_MSC_VER)
1225 #pragma warning(push)
1226 #pragma warning(disable:4996)
1227 #endif
1228 
1229 template<typename Allocator,typename Ptr,typename... Args>
1230 struct alloc_has_construct
1231 {
1232 private:
1233   template<typename Allocator2>
1234   static decltype(
1235     std::declval<Allocator2&>().construct(
1236       std::declval<Ptr>(),std::declval<Args&&>()...),
1237     std::true_type{}
1238   ) check(int);
1239 
1240   template<typename> static std::false_type check(...);
1241 
1242 public:
1243   static constexpr bool value=decltype(check<Allocator>(0))::value;
1244 };
1245 
1246 #if defined(_LIBCPP_SUPPRESS_DEPRECATED_POP)
1247 _LIBCPP_SUPPRESS_DEPRECATED_POP
1248 #elif defined(_STL_RESTORE_DEPRECATED_WARNING)
1249 _STL_RESTORE_DEPRECATED_WARNING
1250 #elif defined(_MSC_VER)
1251 #pragma warning(pop)
1252 #endif
1253 
1254 /* We expose the hard-coded max load factor so that tests can use it without
1255  * needing to pull it from an instantiated class template such as the table
1256  * class.
1257  */
1258 static constexpr float mlf=0.875f;
1259 
1260 template<typename Group,typename Element>
1261 struct table_locator
1262 {
1263   table_locator()=default;
1264   table_locator(Group* pg_,unsigned int n_,Element* p_):pg{pg_},n{n_},p{p_}{}
1265 
1266   explicit operator bool()const noexcept{return p!=nullptr;}
1267 
1268   Group        *pg=nullptr;
1269   unsigned int  n=0;
1270   Element      *p=nullptr;
1271 };
1272 
1273 struct try_emplace_args_t{};
1274 
1275 template<typename TypePolicy,typename Allocator,typename... Args>
1276 class alloc_cted_insert_type
1277 {
1278   using emplace_type=typename std::conditional<
1279     std::is_constructible<typename TypePolicy::init_type,Args...>::value,
1280     typename TypePolicy::init_type,
1281     typename TypePolicy::value_type
1282   >::type;
1283 
1284   using insert_type=typename std::conditional<
1285     std::is_constructible<typename TypePolicy::value_type,emplace_type>::value,
1286     emplace_type,typename TypePolicy::element_type
1287   >::type;
1288 
1289   using alloc_cted = allocator_constructed<Allocator, insert_type, TypePolicy>;
1290   alloc_cted val;
1291 
1292 public:
1293   alloc_cted_insert_type(const Allocator& al_,Args&&... args):val{al_,std::forward<Args>(args)...}
1294   {
1295   }
1296 
1297   insert_type& value(){return val.value();}
1298 };
1299 
1300 template<typename TypePolicy,typename Allocator,typename... Args>
1301 alloc_cted_insert_type<TypePolicy,Allocator,Args...>
1302 alloc_make_insert_type(const Allocator& al,Args&&... args)
1303 {
1304   return {al,std::forward<Args>(args)...};
1305 }
1306 
1307 template <typename TypePolicy, typename Allocator, typename KFwdRef,
1308   typename = void>
1309 class alloc_cted_or_fwded_key_type
1310 {
1311   using key_type = typename TypePolicy::key_type;
1312   allocator_constructed<Allocator, key_type, TypePolicy> val;
1313 
1314 public:
1315   alloc_cted_or_fwded_key_type(const Allocator& al_, KFwdRef k)
1316       : val(al_, std::forward<KFwdRef>(k))
1317   {
1318   }
1319 
1320   key_type&& move_or_fwd() { return std::move(val.value()); }
1321 };
1322 
1323 template <typename TypePolicy, typename Allocator, typename KFwdRef>
1324 class alloc_cted_or_fwded_key_type<TypePolicy, Allocator, KFwdRef,
1325   typename std::enable_if<
1326     is_similar<KFwdRef, typename TypePolicy::key_type>::value>::type>
1327 {
1328   // This specialization acts as a forwarding-reference wrapper
1329   BOOST_UNORDERED_STATIC_ASSERT(std::is_reference<KFwdRef>::value);
1330   KFwdRef ref;
1331 
1332 public:
1333   alloc_cted_or_fwded_key_type(const Allocator&, KFwdRef k)
1334       : ref(std::forward<KFwdRef>(k))
1335   {
1336   }
1337 
1338   KFwdRef move_or_fwd() { return std::forward<KFwdRef>(ref); }
1339 };
1340 
1341 template <typename Container>
1342 using is_map =
1343   std::integral_constant<bool, !std::is_same<typename Container::key_type,
1344                                  typename Container::value_type>::value>;
1345 
1346 template <typename Container, typename K>
1347 using is_emplace_kv_able = std::integral_constant<bool,
1348   is_map<Container>::value &&
1349     (is_similar<K, typename Container::key_type>::value ||
1350       is_complete_and_move_constructible<typename Container::key_type>::value)>;
1351 
1352 /* table_core. The TypePolicy template parameter is used to generate
1353  * instantiations suitable for either maps or sets, and introduces non-standard
1354  * init_type and element_type:
1355  *
1356  *   - TypePolicy::key_type and TypePolicy::value_type have the obvious
1357  *     meaning. TypePolicy::mapped_type is expected to be provided as well
1358  *     when key_type and value_type are not the same.
1359  *
1360  *   - TypePolicy::init_type is the type implicitly converted to when
1361  *     writing x.insert({...}). For maps, this is std::pair<Key,T> rather
1362  *     than std::pair<const Key,T> so that, for instance, x.insert({"hello",0})
1363  *     produces a cheaply moveable std::string&& ("hello") rather than
1364  *     a copyable const std::string&&. foa::table::insert is extended to accept
1365  *     both init_type and value_type references.
1366  *
1367  *   - TypePolicy::construct and TypePolicy::destroy are used for the
1368  *     construction and destruction of the internal types: value_type,
1369  *     init_type, element_type, and key_type.
1370  * 
1371  *   - TypePolicy::move is used to provide move semantics for the internal
1372  *     types used by the container during rehashing and emplace. These types
1373  *     are init_type, value_type and emplace_type. During insertion, a
1374  *     stack-local type will be created based on the constructibility of the
1375  *     value_type and the supplied arguments. TypePolicy::move is used here
1376  *     for transfer of ownership. Similarly, TypePolicy::move is also used
1377  *     during rehashing when elements are moved to the new table.
1378  *
1379  *   - TypePolicy::extract returns a const reference to the key part of
1380  *     a value of type value_type, init_type, element_type or
1381  *     decltype(TypePolicy::move(...)).
1382  *
1383  *   - TypePolicy::element_type is the type that table_arrays uses when
1384  *     allocating buckets, which allows us to have flat and node container.
1385  *     For flat containers, element_type is value_type. For node
1386  *     containers, it is a strong typedef to value_type*.
1387  *
1388  *   - TypePolicy::value_from returns a mutable reference to value_type from
1389  *     a given element_type. This is used when elements of the table themselves
1390  *     need to be moved, such as during move construction/assignment when
1391  *     allocators are unequal and there is no propagation. For all other cases,
1392  *     the element_type itself is moved.
1393  */
1394 
1395 #include <boost/unordered/detail/foa/ignore_wshadow.hpp>
1396 
1397 #if defined(BOOST_MSVC)
1398 #pragma warning(push)
1399 #pragma warning(disable:4714) /* marked as __forceinline not inlined */
1400 #endif
1401 
1402 #if BOOST_WORKAROUND(BOOST_MSVC,<=1900)
1403 /* VS2015 marks as unreachable generic catch clauses around non-throwing
1404  * code.
1405  */
1406 #pragma warning(push)
1407 #pragma warning(disable:4702)
1408 #endif
1409 
1410 template<
1411   typename TypePolicy,typename Group,template<typename...> class Arrays,
1412   typename SizeControl,typename Hash,typename Pred,typename Allocator
1413 >
1414 class 
1415 
1416 #if defined(_MSC_VER)&&_MSC_FULL_VER>=190023918
1417 __declspec(empty_bases) /* activate EBO with multiple inheritance */
1418 #endif
1419 
1420 table_core:empty_value<Hash,0>,empty_value<Pred,1>,empty_value<Allocator,2>
1421 {
1422 public:
1423   using type_policy=TypePolicy;
1424   using group_type=Group;
1425   static constexpr auto N=group_type::N;
1426   using size_policy=pow2_size_policy;
1427   using prober=pow2_quadratic_prober;
1428   using mix_policy=typename std::conditional<
1429     hash_is_avalanching<Hash>::value,
1430     no_mix,
1431     mulx_mix
1432   >::type;
1433   using alloc_traits=boost::allocator_traits<Allocator>;
1434   using element_type=typename type_policy::element_type;
1435   using arrays_type=Arrays<element_type,group_type,size_policy,Allocator>;
1436   using size_ctrl_type=SizeControl;
1437   static constexpr auto uses_fancy_pointers=!std::is_same<
1438     typename alloc_traits::pointer,
1439     typename alloc_traits::value_type*
1440   >::value;
1441 
1442   using key_type=typename type_policy::key_type;
1443   using init_type=typename type_policy::init_type;
1444   using value_type=typename type_policy::value_type;
1445   using hasher=Hash;
1446   using key_equal=Pred;
1447   using allocator_type=Allocator;
1448   using pointer=value_type*;
1449   using const_pointer=const value_type*;
1450   using reference=value_type&;
1451   using const_reference=const value_type&;
1452   using size_type=std::size_t;
1453   using difference_type=std::ptrdiff_t;
1454   using locator=table_locator<group_type,element_type>;
1455   using arrays_holder_type=arrays_holder<arrays_type,Allocator>;
1456 
1457 #if defined(BOOST_UNORDERED_ENABLE_STATS)
1458   using cumulative_stats=table_core_cumulative_stats;
1459   using stats=table_core_stats;
1460 #endif
1461 
1462 #if defined(BOOST_GCC)
1463 #pragma GCC diagnostic push
1464 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
1465 #endif
1466 
1467   table_core(
1468     std::size_t n=default_bucket_count,const Hash& h_=Hash(),
1469     const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
1470     hash_base{empty_init,h_},pred_base{empty_init,pred_},
1471     allocator_base{empty_init,al_},arrays(new_arrays(n)),
1472     size_ctrl{initial_max_load(),0}
1473     {}
1474 
1475 #if defined(BOOST_GCC)
1476 #pragma GCC diagnostic pop
1477 #endif
1478 
1479   /* genericize on an ArraysFn so that we can do things like delay an
1480    * allocation for the group_access data required by cfoa after the move
1481    * constructors of Hash, Pred have been invoked
1482    */
1483   template<typename ArraysFn>
1484   table_core(
1485     Hash&& h_,Pred&& pred_,Allocator&& al_,
1486     ArraysFn arrays_fn,const size_ctrl_type& size_ctrl_):
1487     hash_base{empty_init,std::move(h_)},
1488     pred_base{empty_init,std::move(pred_)},
1489     allocator_base{empty_init,std::move(al_)},
1490     arrays(arrays_fn()),size_ctrl(size_ctrl_)
1491   {}
1492 
1493   table_core(const table_core& x):
1494     table_core{x,alloc_traits::select_on_container_copy_construction(x.al())}{}
1495 
1496   template<typename ArraysFn>
1497   table_core(table_core&& x,arrays_holder_type&& ah,ArraysFn arrays_fn):
1498     table_core(
1499       std::move(x.h()),std::move(x.pred()),std::move(x.al()),
1500       arrays_fn,x.size_ctrl)
1501   {
1502     x.arrays=ah.release();
1503     x.size_ctrl.ml=x.initial_max_load();
1504     x.size_ctrl.size=0;
1505     BOOST_UNORDERED_SWAP_STATS(cstats,x.cstats);
1506   }
1507 
1508   table_core(table_core&& x)
1509     noexcept(
1510       std::is_nothrow_move_constructible<Hash>::value&&
1511       std::is_nothrow_move_constructible<Pred>::value&&
1512       std::is_nothrow_move_constructible<Allocator>::value&&
1513       !uses_fancy_pointers):
1514     table_core{
1515       std::move(x),x.make_empty_arrays(),[&x]{return x.arrays;}}
1516   {}
1517 
1518   table_core(const table_core& x,const Allocator& al_):
1519     table_core{std::size_t(std::ceil(float(x.size())/mlf)),x.h(),x.pred(),al_}
1520   {
1521     copy_elements_from(x);
1522   }
1523 
1524   table_core(table_core&& x,const Allocator& al_):
1525     table_core{std::move(x.h()),std::move(x.pred()),al_}
1526   {
1527     if(al()==x.al()){
1528       using std::swap;
1529       swap(arrays,x.arrays);
1530       swap(size_ctrl,x.size_ctrl);
1531       BOOST_UNORDERED_SWAP_STATS(cstats,x.cstats);
1532     }
1533     else{
1534       reserve(x.size());
1535       clear_on_exit c{x};
1536       (void)c; /* unused var warning */
1537       BOOST_UNORDERED_RESET_STATS_OF(x);
1538 
1539       /* This works because subsequent x.clear() does not depend on the
1540        * elements' values.
1541        */
1542       x.for_all_elements([this](element_type* p){
1543         unchecked_insert(type_policy::move(type_policy::value_from(*p)));
1544       });
1545     }
1546   }
1547 
1548   ~table_core()noexcept
1549   {
1550     for_all_elements([this](element_type* p){
1551       destroy_element(p);
1552     });
1553     delete_arrays(arrays);
1554   }
1555 
1556   std::size_t initial_max_load()const
1557   {
1558     static constexpr std::size_t small_capacity=2*N-1;
1559 
1560     auto capacity_=capacity();
1561     if(capacity_<=small_capacity){
1562       return capacity_; /* we allow 100% usage */
1563     }
1564     else{
1565       return (std::size_t)(mlf*(float)(capacity_));
1566     }
1567   }
1568 
1569   arrays_holder_type make_empty_arrays()const
1570   {
1571     return make_arrays(0);
1572   }
1573 
1574   table_core& operator=(const table_core& x)
1575   {
1576     BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred)
1577 
1578     static constexpr auto pocca=
1579       alloc_traits::propagate_on_container_copy_assignment::value;
1580 
1581     if(this!=std::addressof(x)){
1582       /* If copy construction here winds up throwing, the container is still
1583        * left intact so we perform these operations first.
1584        */
1585       hasher    tmp_h=x.h();
1586       key_equal tmp_p=x.pred();
1587 
1588       clear();
1589 
1590       /* Because we've asserted at compile-time that Hash and Pred are nothrow
1591        * swappable, we can safely mutate our source container and maintain
1592        * consistency between the Hash, Pred compatibility.
1593        */
1594       using std::swap;
1595       swap(h(),tmp_h);
1596       swap(pred(),tmp_p);
1597 
1598       if_constexpr<pocca>([&,this]{
1599         if(al()!=x.al()){
1600           auto ah=x.make_arrays(std::size_t(std::ceil(float(x.size())/mlf)));
1601           delete_arrays(arrays);
1602           arrays=ah.release();
1603           size_ctrl.ml=initial_max_load();
1604         }
1605         copy_assign_if<pocca>(al(),x.al());
1606       });
1607       /* noshrink: favor memory reuse over tightness */
1608       noshrink_reserve(x.size());
1609       copy_elements_from(x);
1610     }
1611     return *this;
1612   }
1613 
1614 #if defined(BOOST_MSVC)
1615 #pragma warning(push)
1616 #pragma warning(disable:4127) /* conditional expression is constant */
1617 #endif
1618 
1619   table_core& operator=(table_core&& x)
1620     noexcept(
1621       (alloc_traits::propagate_on_container_move_assignment::value||
1622       alloc_traits::is_always_equal::value)&&!uses_fancy_pointers)
1623   {
1624     BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred)
1625 
1626     static constexpr auto pocma=
1627       alloc_traits::propagate_on_container_move_assignment::value;
1628 
1629     if(this!=std::addressof(x)){
1630       /* Given ambiguity in implementation strategies briefly discussed here:
1631        * https://www.open-std.org/jtc1/sc22/wg21/docs/lwg-active.html#2227
1632        *
1633        * we opt into requiring nothrow swappability and eschew the move
1634        * operations associated with Hash, Pred.
1635        *
1636        * To this end, we ensure that the user never has to consider the
1637        * moved-from state of their Hash, Pred objects
1638        */
1639 
1640       using std::swap;
1641 
1642       clear();
1643 
1644       if(pocma||al()==x.al()){
1645         auto ah=x.make_empty_arrays();
1646         swap(h(),x.h());
1647         swap(pred(),x.pred());
1648         delete_arrays(arrays);
1649         move_assign_if<pocma>(al(),x.al());
1650         arrays=x.arrays;
1651         size_ctrl.ml=std::size_t(x.size_ctrl.ml);
1652         size_ctrl.size=std::size_t(x.size_ctrl.size);
1653         BOOST_UNORDERED_COPY_STATS(cstats,x.cstats);
1654         x.arrays=ah.release();
1655         x.size_ctrl.ml=x.initial_max_load();
1656         x.size_ctrl.size=0;
1657         BOOST_UNORDERED_RESET_STATS_OF(x);
1658       }
1659       else{
1660         swap(h(),x.h());
1661         swap(pred(),x.pred());
1662 
1663         /* noshrink: favor memory reuse over tightness */
1664         noshrink_reserve(x.size());
1665         clear_on_exit c{x};
1666         (void)c; /* unused var warning */
1667         BOOST_UNORDERED_RESET_STATS_OF(x);
1668 
1669         /* This works because subsequent x.clear() does not depend on the
1670          * elements' values.
1671          */
1672         x.for_all_elements([this](element_type* p){
1673           unchecked_insert(type_policy::move(type_policy::value_from(*p)));
1674         });
1675       }
1676     }
1677     return *this;
1678   }
1679 
1680 #if defined(BOOST_MSVC)
1681 #pragma warning(pop) /* C4127 */
1682 #endif
1683 
1684   allocator_type get_allocator()const noexcept{return al();}
1685 
1686   bool        empty()const noexcept{return size()==0;}
1687   std::size_t size()const noexcept{return size_ctrl.size;}
1688   std::size_t max_size()const noexcept{return SIZE_MAX;}
1689 
1690   BOOST_FORCEINLINE
1691   void erase(group_type* pg,unsigned int pos,element_type* p)noexcept
1692   {
1693     destroy_element(p);
1694     recover_slot(pg,pos);
1695   }
1696 
1697   BOOST_FORCEINLINE
1698   void erase(unsigned char* pc,element_type* p)noexcept
1699   {
1700     destroy_element(p);
1701     recover_slot(pc);
1702   }
1703 
1704   template<typename Key>
1705   BOOST_FORCEINLINE locator find(const Key& x)const
1706   {
1707     auto hash=hash_for(x);
1708     return find(x,position_for(hash),hash);
1709   }
1710 
1711 #if defined(BOOST_MSVC)
1712 /* warning: forcing value to bool 'true' or 'false' in bool(pred()...) */
1713 #pragma warning(push)
1714 #pragma warning(disable:4800)
1715 #endif
1716 
1717   template<typename Key>
1718   BOOST_FORCEINLINE locator find(
1719     const Key& x,std::size_t pos0,std::size_t hash)const
1720   {    
1721     BOOST_UNORDERED_STATS_COUNTER(num_cmps);
1722     prober pb(pos0);
1723     do{
1724       auto pos=pb.get();
1725       auto pg=arrays.groups()+pos;
1726       auto mask=pg->match(hash);
1727       if(mask){
1728         auto elements=arrays.elements();
1729         BOOST_UNORDERED_ASSUME(elements!=nullptr);
1730         auto p=elements+pos*N;
1731         BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
1732         do{
1733           BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
1734           auto n=unchecked_countr_zero(mask);
1735           if(BOOST_LIKELY(bool(pred()(x,key_from(p[n]))))){
1736             BOOST_UNORDERED_ADD_STATS(
1737               cstats.successful_lookup,(pb.length(),num_cmps));
1738             return {pg,n,p+n};
1739           }
1740           mask&=mask-1;
1741         }while(mask);
1742       }
1743       if(BOOST_LIKELY(pg->is_not_overflowed(hash))){
1744         BOOST_UNORDERED_ADD_STATS(
1745           cstats.unsuccessful_lookup,(pb.length(),num_cmps));
1746         return {};
1747       }
1748     }
1749     while(BOOST_LIKELY(pb.next(arrays.groups_size_mask)));
1750     BOOST_UNORDERED_ADD_STATS(
1751       cstats.unsuccessful_lookup,(pb.length(),num_cmps));
1752     return {};
1753   }
1754 
1755 #if defined(BOOST_MSVC)
1756 #pragma warning(pop) /* C4800 */
1757 #endif
1758 
1759   void swap(table_core& x)
1760     noexcept(
1761       alloc_traits::propagate_on_container_swap::value||
1762       alloc_traits::is_always_equal::value)
1763   {
1764     BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred)
1765 
1766     static constexpr auto pocs=
1767       alloc_traits::propagate_on_container_swap::value;
1768 
1769     using std::swap;
1770     if_constexpr<pocs>([&,this]{
1771       swap_if<pocs>(al(),x.al());
1772     },
1773     [&,this]{ /* else */
1774       BOOST_ASSERT(al()==x.al());
1775       (void)this; /* makes sure captured this is used */
1776     });
1777 
1778     swap(h(),x.h());
1779     swap(pred(),x.pred());
1780     swap(arrays,x.arrays);
1781     swap(size_ctrl,x.size_ctrl);
1782   }
1783 
1784   void clear()noexcept
1785   {
1786     auto p=arrays.elements();
1787     if(p){
1788       for(auto pg=arrays.groups(),last=pg+arrays.groups_size_mask+1;
1789           pg!=last;++pg,p+=N){
1790         auto mask=match_really_occupied(pg,last);
1791         while(mask){
1792           destroy_element(p+unchecked_countr_zero(mask));
1793           mask&=mask-1;
1794         }
1795         /* we wipe the entire metadata to reset the overflow byte as well */
1796         pg->initialize();
1797       }
1798       arrays.groups()[arrays.groups_size_mask].set_sentinel();
1799       size_ctrl.ml=initial_max_load();
1800       size_ctrl.size=0;
1801     }
1802   }
1803 
1804   hasher hash_function()const{return h();}
1805   key_equal key_eq()const{return pred();}
1806 
1807   std::size_t capacity()const noexcept
1808   {
1809     return arrays.elements()?(arrays.groups_size_mask+1)*N-1:0;
1810   }
1811   
1812   float load_factor()const noexcept
1813   {
1814     if(capacity()==0)return 0;
1815     else             return float(size())/float(capacity());
1816   }
1817 
1818   float max_load_factor()const noexcept{return mlf;}
1819 
1820   std::size_t max_load()const noexcept{return size_ctrl.ml;}
1821 
1822   void rehash(std::size_t n)
1823   {
1824     auto m=size_t(std::ceil(float(size())/mlf));
1825     if(m>n)n=m;
1826     if(n)n=capacity_for(n); /* exact resulting capacity */
1827 
1828     if(n!=capacity())unchecked_rehash(n);
1829   }
1830 
1831   void reserve(std::size_t n)
1832   {
1833     rehash(std::size_t(std::ceil(float(n)/mlf)));
1834   }
1835 
1836 #if defined(BOOST_UNORDERED_ENABLE_STATS)
1837   stats get_stats()const
1838   {
1839     auto insertion=cstats.insertion.get_summary();
1840     auto successful_lookup=cstats.successful_lookup.get_summary();
1841     auto unsuccessful_lookup=cstats.unsuccessful_lookup.get_summary();
1842     return{
1843       {
1844         insertion.count,
1845         insertion.sequence_summary[0]
1846       },
1847       {
1848         successful_lookup.count,
1849         successful_lookup.sequence_summary[0],
1850         successful_lookup.sequence_summary[1]
1851       },
1852       {
1853         unsuccessful_lookup.count,
1854         unsuccessful_lookup.sequence_summary[0],
1855         unsuccessful_lookup.sequence_summary[1]
1856       },
1857     };
1858   }
1859 
1860   void reset_stats()noexcept
1861   {
1862     cstats.insertion.reset();
1863     cstats.successful_lookup.reset();
1864     cstats.unsuccessful_lookup.reset();
1865   }
1866 #endif
1867 
1868   friend bool operator==(const table_core& x,const table_core& y)
1869   {
1870     return
1871       x.size()==y.size()&&
1872       x.for_all_elements_while([&](element_type* p){
1873         auto loc=y.find(key_from(*p));
1874         return loc&&
1875           const_cast<const value_type&>(type_policy::value_from(*p))==
1876           const_cast<const value_type&>(type_policy::value_from(*loc.p));
1877       });
1878   }
1879 
1880   friend bool operator!=(const table_core& x,const table_core& y)
1881   {
1882     return !(x==y);
1883   }
1884 
1885   struct clear_on_exit
1886   {
1887     ~clear_on_exit(){x.clear();}
1888     table_core& x;
1889   };
1890 
1891   Hash&            h(){return hash_base::get();}
1892   const Hash&      h()const{return hash_base::get();}
1893   Pred&            pred(){return pred_base::get();}
1894   const Pred&      pred()const{return pred_base::get();}
1895   Allocator&       al(){return allocator_base::get();}
1896   const Allocator& al()const{return allocator_base::get();}
1897 
1898   template<typename... Args>
1899   void construct_element(element_type* p,Args&&... args)
1900   {
1901     type_policy::construct(al(),p,std::forward<Args>(args)...);
1902   }
1903 
1904   template<typename... Args>
1905   void construct_element(element_type* p,try_emplace_args_t,Args&&... args)
1906   {
1907     construct_element_from_try_emplace_args(
1908       p,
1909       std::integral_constant<bool,std::is_same<key_type,value_type>::value>{},
1910       std::forward<Args>(args)...);
1911   }
1912 
1913   void destroy_element(element_type* p)noexcept
1914   {
1915     type_policy::destroy(al(),p);
1916   }
1917 
1918   struct destroy_element_on_exit
1919   {
1920     ~destroy_element_on_exit(){this_->destroy_element(p);}
1921     table_core   *this_;
1922     element_type *p;
1923   };
1924 
1925   template<typename T>
1926   static inline auto key_from(const T& x)
1927     ->decltype(type_policy::extract(x))
1928   {
1929     return type_policy::extract(x);
1930   }
1931 
1932   template<typename Key,typename... Args>
1933   static inline const Key& key_from(
1934     try_emplace_args_t,const Key& x,const Args&...)
1935   {
1936     return x;
1937   }
1938 
1939   template<typename Key>
1940   inline std::size_t hash_for(const Key& x)const
1941   {
1942     return mix_policy::mix(h(),x);
1943   }
1944 
1945   inline std::size_t position_for(std::size_t hash)const
1946   {
1947     return position_for(hash,arrays);
1948   }
1949 
1950   static inline std::size_t position_for(
1951     std::size_t hash,const arrays_type& arrays_)
1952   {
1953     return size_policy::position(hash,arrays_.groups_size_index);
1954   }
1955 
1956   static inline int match_really_occupied(group_type* pg,group_type* last)
1957   {
1958     /* excluding the sentinel */
1959     return pg->match_occupied()&~(int(pg==last-1)<<(N-1));
1960   }
1961 
1962   template<typename... Args>
1963   locator unchecked_emplace_at(
1964     std::size_t pos0,std::size_t hash,Args&&... args)
1965   {
1966     auto res=nosize_unchecked_emplace_at(
1967       arrays,pos0,hash,std::forward<Args>(args)...);
1968     ++size_ctrl.size;
1969     return res;
1970   }
1971 
1972   BOOST_NOINLINE void unchecked_rehash_for_growth()
1973   {
1974     auto new_arrays_=new_arrays_for_growth();
1975     unchecked_rehash(new_arrays_);
1976   }
1977 
1978   template<typename... Args>
1979   BOOST_NOINLINE locator
1980   unchecked_emplace_with_rehash(std::size_t hash,Args&&... args)
1981   {
1982     auto    new_arrays_=new_arrays_for_growth();
1983     locator it;
1984     BOOST_TRY{
1985       /* strong exception guarantee -> try insertion before rehash */
1986       it=nosize_unchecked_emplace_at(
1987         new_arrays_,position_for(hash,new_arrays_),
1988         hash,std::forward<Args>(args)...);
1989     }
1990     BOOST_CATCH(...){
1991       delete_arrays(new_arrays_);
1992       BOOST_RETHROW
1993     }
1994     BOOST_CATCH_END
1995 
1996     /* new_arrays_ lifetime taken care of by unchecked_rehash */
1997     unchecked_rehash(new_arrays_);
1998     ++size_ctrl.size;
1999     return it;
2000   }
2001 
2002   void noshrink_reserve(std::size_t n)
2003   {
2004     /* used only on assignment after element clearance */
2005     BOOST_ASSERT(empty());
2006 
2007     if(n){
2008       n=std::size_t(std::ceil(float(n)/mlf)); /* elements -> slots */
2009       n=capacity_for(n); /* exact resulting capacity */
2010 
2011       if(n>capacity()){
2012         auto new_arrays_=new_arrays(n);
2013         delete_arrays(arrays);
2014         arrays=new_arrays_;
2015         size_ctrl.ml=initial_max_load();
2016       }
2017     }
2018   }
2019 
2020   template<typename F>
2021   void for_all_elements(F f)const
2022   {
2023     for_all_elements(arrays,f);
2024   }
2025 
2026   template<typename F>
2027   static auto for_all_elements(const arrays_type& arrays_,F f)
2028     ->decltype(f(nullptr),void())
2029   {
2030     for_all_elements_while(arrays_,[&](element_type* p){f(p);return true;});
2031   }
2032 
2033   template<typename F>
2034   static auto for_all_elements(const arrays_type& arrays_,F f)
2035     ->decltype(f(nullptr,0,nullptr),void())
2036   {
2037     for_all_elements_while(
2038       arrays_,[&](group_type* pg,unsigned int n,element_type* p)
2039         {f(pg,n,p);return true;});
2040   }
2041 
2042   template<typename F>
2043   bool for_all_elements_while(F f)const
2044   {
2045     return for_all_elements_while(arrays,f);
2046   }
2047 
2048   template<typename F>
2049   static auto for_all_elements_while(const arrays_type& arrays_,F f)
2050     ->decltype(f(nullptr),bool())
2051   {
2052     return for_all_elements_while(
2053       arrays_,[&](group_type*,unsigned int,element_type* p){return f(p);});
2054   }
2055 
2056   template<typename F>
2057   static auto for_all_elements_while(const arrays_type& arrays_,F f)
2058     ->decltype(f(nullptr,0,nullptr),bool())
2059   {
2060     auto p=arrays_.elements();
2061     if(p){
2062       for(auto pg=arrays_.groups(),last=pg+arrays_.groups_size_mask+1;
2063           pg!=last;++pg,p+=N){
2064         auto mask=match_really_occupied(pg,last);
2065         while(mask){
2066           auto n=unchecked_countr_zero(mask);
2067           if(!f(pg,n,p+n))return false;
2068           mask&=mask-1;
2069         }
2070       }
2071     }
2072     return true;
2073   }
2074 
2075   arrays_type              arrays;
2076   size_ctrl_type           size_ctrl;
2077 
2078 #if defined(BOOST_UNORDERED_ENABLE_STATS)
2079   mutable cumulative_stats cstats;
2080 #endif
2081 
2082 private:
2083   template<
2084     typename,typename,template<typename...> class,
2085     typename,typename,typename,typename
2086   >
2087   friend class table_core;
2088 
2089   using hash_base=empty_value<Hash,0>;
2090   using pred_base=empty_value<Pred,1>;
2091   using allocator_base=empty_value<Allocator,2>;
2092 
2093 #if defined(BOOST_GCC)
2094 #pragma GCC diagnostic push
2095 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
2096 #endif
2097 
2098   /* used by allocator-extended move ctor */
2099 
2100   table_core(Hash&& h_,Pred&& pred_,const Allocator& al_):
2101     hash_base{empty_init,std::move(h_)},
2102     pred_base{empty_init,std::move(pred_)},
2103     allocator_base{empty_init,al_},arrays(new_arrays(0)),
2104     size_ctrl{initial_max_load(),0}
2105   {
2106   }
2107 
2108 #if defined(BOOST_GCC)
2109 #pragma GCC diagnostic pop
2110 #endif
2111 
2112   arrays_type new_arrays(std::size_t n)const
2113   {
2114     return arrays_type::new_(typename arrays_type::allocator_type(al()),n);
2115   }
2116 
2117   arrays_type new_arrays_for_growth()const
2118   {
2119     /* Due to the anti-drift mechanism (see recover_slot), the new arrays may
2120      * be of the same size as the old arrays; in the limit, erasing one
2121      * element at full load and then inserting could bring us back to the same
2122      * capacity after a costly rehash. To avoid this, we jump to the next
2123      * capacity level when the number of erased elements is <= 10% of total
2124      * elements at full load, which is implemented by requesting additional
2125      * F*size elements, with F = P * 10% / (1 - P * 10%), where P is the
2126      * probability of an element having caused overflow; P has been measured as
2127      * ~0.162 under ideal conditions, yielding F ~ 0.0165 ~ 1/61.
2128      */
2129     return new_arrays(std::size_t(
2130       std::ceil(static_cast<float>(size()+size()/61+1)/mlf)));
2131   }
2132 
2133   void delete_arrays(arrays_type& arrays_)noexcept
2134   {
2135     arrays_type::delete_(typename arrays_type::allocator_type(al()),arrays_);
2136   }
2137 
2138   arrays_holder_type make_arrays(std::size_t n)const
2139   {
2140     return {new_arrays(n),al()};
2141   }
2142 
2143   template<typename Key,typename... Args>
2144   void construct_element_from_try_emplace_args(
2145     element_type* p,std::false_type,Key&& x,Args&&... args)
2146   {
2147     type_policy::construct(
2148       this->al(),p,
2149       std::piecewise_construct,
2150       std::forward_as_tuple(std::forward<Key>(x)),
2151       std::forward_as_tuple(std::forward<Args>(args)...));
2152   }
2153 
2154   /* This overload allows boost::unordered_flat_set to internally use
2155    * try_emplace to implement heterogeneous insert (P2363).
2156    */
2157 
2158   template<typename Key>
2159   void construct_element_from_try_emplace_args(
2160     element_type* p,std::true_type,Key&& x)
2161   {
2162     type_policy::construct(this->al(),p,std::forward<Key>(x));
2163   }
2164 
2165   void copy_elements_from(const table_core& x)
2166   {
2167     BOOST_ASSERT(empty());
2168     BOOST_ASSERT(this!=std::addressof(x));
2169     if(arrays.groups_size_mask==x.arrays.groups_size_mask){
2170       fast_copy_elements_from(x);
2171     }
2172     else{
2173       x.for_all_elements([this](const element_type* p){
2174         unchecked_insert(*p);
2175       });
2176     }
2177   }
2178 
2179   void fast_copy_elements_from(const table_core& x)
2180   {
2181     if(arrays.elements()&&x.arrays.elements()){
2182       copy_elements_array_from(x);
2183       copy_groups_array_from(x);
2184       size_ctrl.ml=std::size_t(x.size_ctrl.ml);
2185       size_ctrl.size=std::size_t(x.size_ctrl.size);
2186     }
2187   }
2188 
2189   void copy_elements_array_from(const table_core& x)
2190   {
2191     copy_elements_array_from(
2192       x,
2193       std::integral_constant<
2194         bool,
2195         is_trivially_copy_constructible<element_type>::value&&(
2196           is_std_allocator<Allocator>::value||
2197           !alloc_has_construct<Allocator,value_type*,const value_type&>::value)
2198       >{}
2199     );
2200   }
2201 
2202   void copy_elements_array_from(
2203     const table_core& x,std::true_type /* -> memcpy */)
2204   {
2205     /* reinterpret_cast: GCC may complain about value_type not being trivially
2206      * copy-assignable when we're relying on trivial copy constructibility.
2207      */
2208     std::memcpy(
2209       reinterpret_cast<unsigned char*>(arrays.elements()),
2210       reinterpret_cast<unsigned char*>(x.arrays.elements()),
2211       x.capacity()*sizeof(value_type));
2212   }
2213 
2214   void copy_elements_array_from(
2215     const table_core& x,std::false_type /* -> manual */)
2216   {
2217     std::size_t num_constructed=0;
2218     BOOST_TRY{
2219       x.for_all_elements([&,this](const element_type* p){
2220         construct_element(arrays.elements()+(p-x.arrays.elements()),*p);
2221         ++num_constructed;
2222       });
2223     }
2224     BOOST_CATCH(...){
2225       if(num_constructed){
2226         x.for_all_elements_while([&,this](const element_type* p){
2227           destroy_element(arrays.elements()+(p-x.arrays.elements()));
2228           return --num_constructed!=0;
2229         });
2230       }
2231       BOOST_RETHROW
2232     }
2233     BOOST_CATCH_END
2234   }
2235 
2236   void copy_groups_array_from(const table_core& x) {
2237     copy_groups_array_from(x,is_trivially_copy_assignable<group_type>{});
2238   }
2239 
2240   void copy_groups_array_from(
2241     const table_core& x, std::true_type /* -> memcpy */)
2242   {
2243     std::memcpy(
2244       arrays.groups(),x.arrays.groups(),
2245       (arrays.groups_size_mask+1)*sizeof(group_type));
2246   }
2247 
2248   void copy_groups_array_from(
2249     const table_core& x, std::false_type /* -> manual */) 
2250   {
2251     auto pg=arrays.groups();
2252     auto xpg=x.arrays.groups();
2253     for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
2254       pg[i]=xpg[i];
2255     }
2256   }
2257 
2258   void recover_slot(unsigned char* pc)
2259   {
2260     /* If this slot potentially caused overflow, we decrease the maximum load
2261      * so that average probe length won't increase unboundedly in repeated
2262      * insert/erase cycles (drift).
2263      */
2264     size_ctrl.ml-=group_type::maybe_caused_overflow(pc);
2265     group_type::reset(pc);
2266     --size_ctrl.size;
2267   }
2268 
2269   void recover_slot(group_type* pg,std::size_t pos)
2270   {
2271     recover_slot(reinterpret_cast<unsigned char*>(pg)+pos);
2272   }
2273 
2274   static std::size_t capacity_for(std::size_t n)
2275   {
2276     return size_policy::size(size_index_for<group_type,size_policy>(n))*N-1;
2277   }
2278 
2279   BOOST_NOINLINE void unchecked_rehash(std::size_t n)
2280   {
2281     auto new_arrays_=new_arrays(n);
2282     unchecked_rehash(new_arrays_);
2283   }
2284 
2285   BOOST_NOINLINE void unchecked_rehash(arrays_type& new_arrays_)
2286   {
2287     std::size_t num_destroyed=0;
2288     BOOST_TRY{
2289       for_all_elements([&,this](element_type* p){
2290         nosize_transfer_element(p,new_arrays_,num_destroyed);
2291       });
2292     }
2293     BOOST_CATCH(...){
2294       if(num_destroyed){
2295         for_all_elements_while(
2296           [&,this](group_type* pg,unsigned int n,element_type*){
2297             recover_slot(pg,n);
2298             return --num_destroyed!=0;
2299           }
2300         );
2301       }
2302       for_all_elements(new_arrays_,[this](element_type* p){
2303         destroy_element(p);
2304       });
2305       delete_arrays(new_arrays_);
2306       BOOST_RETHROW
2307     }
2308     BOOST_CATCH_END
2309 
2310     /* either all moved and destroyed or all copied */
2311     BOOST_ASSERT(num_destroyed==size()||num_destroyed==0);
2312     if(num_destroyed!=size()){
2313       for_all_elements([this](element_type* p){
2314         destroy_element(p);
2315       });
2316     }
2317     delete_arrays(arrays);
2318     arrays=new_arrays_;
2319     size_ctrl.ml=initial_max_load();
2320   }
2321 
2322   template<typename Value>
2323   void unchecked_insert(Value&& x)
2324   {
2325     auto hash=hash_for(key_from(x));
2326     unchecked_emplace_at(position_for(hash),hash,std::forward<Value>(x));
2327   }
2328 
2329   void nosize_transfer_element(
2330     element_type* p,const arrays_type& arrays_,std::size_t& num_destroyed)
2331   {
2332     nosize_transfer_element(
2333       p,hash_for(key_from(*p)),arrays_,num_destroyed,
2334       std::integral_constant< /* std::move_if_noexcept semantics */
2335         bool,
2336         std::is_nothrow_move_constructible<init_type>::value||
2337         !std::is_same<element_type,value_type>::value||
2338         !std::is_copy_constructible<element_type>::value>{});
2339   }
2340 
2341   void nosize_transfer_element(
2342     element_type* p,std::size_t hash,const arrays_type& arrays_,
2343     std::size_t& num_destroyed,std::true_type /* ->move */)
2344   {
2345     /* Destroy p even if an an exception is thrown in the middle of move
2346      * construction, which could leave the source half-moved.
2347      */
2348     ++num_destroyed;
2349     destroy_element_on_exit d{this,p};
2350     (void)d; /* unused var warning */
2351     nosize_unchecked_emplace_at(
2352       arrays_,position_for(hash,arrays_),hash,type_policy::move(*p));
2353   }
2354 
2355   void nosize_transfer_element(
2356     element_type* p,std::size_t hash,const arrays_type& arrays_,
2357     std::size_t& /*num_destroyed*/,std::false_type /* ->copy */)
2358   {
2359     nosize_unchecked_emplace_at(
2360       arrays_,position_for(hash,arrays_),hash,
2361       const_cast<const element_type&>(*p));
2362   }
2363 
2364   template<typename... Args>
2365   locator nosize_unchecked_emplace_at(
2366     const arrays_type& arrays_,std::size_t pos0,std::size_t hash,
2367     Args&&... args)
2368   {
2369     for(prober pb(pos0);;pb.next(arrays_.groups_size_mask)){
2370       auto pos=pb.get();
2371       auto pg=arrays_.groups()+pos;
2372       auto mask=pg->match_available();
2373       if(BOOST_LIKELY(mask!=0)){
2374         auto n=unchecked_countr_zero(mask);
2375         auto p=arrays_.elements()+pos*N+n;
2376         construct_element(p,std::forward<Args>(args)...);
2377         pg->set(n,hash);
2378         BOOST_UNORDERED_ADD_STATS(cstats.insertion,(pb.length()));
2379         return {pg,n,p};
2380       }
2381       else pg->mark_overflow(hash);
2382     }
2383   }
2384 };
2385 
2386 #if BOOST_WORKAROUND(BOOST_MSVC,<=1900)
2387 #pragma warning(pop) /* C4702 */
2388 #endif
2389 
2390 #if defined(BOOST_MSVC)
2391 #pragma warning(pop) /* C4714 */
2392 #endif
2393 
2394 #include <boost/unordered/detail/foa/restore_wshadow.hpp>
2395 
2396 } /* namespace foa */
2397 } /* namespace detail */
2398 } /* namespace unordered */
2399 } /* namespace boost */
2400 
2401 #undef BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED
2402 #undef BOOST_UNORDERED_HAS_FEATURE
2403 #undef BOOST_UNORDERED_HAS_BUILTIN
2404 #endif