Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:53:19

0001 /* Fast open-addressing concurrent hash table.
0002  *
0003  * Copyright 2023 Joaquin M Lopez Munoz.
0004  * Distributed under the Boost Software License, Version 1.0.
0005  * (See accompanying file LICENSE_1_0.txt or copy at
0006  * http://www.boost.org/LICENSE_1_0.txt)
0007  *
0008  * See https://www.boost.org/libs/unordered for library home page.
0009  */
0010 
0011 #ifndef BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP
0012 #define BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP
0013 
0014 #include <atomic>
0015 #include <boost/assert.hpp>
0016 #include <boost/config.hpp>
0017 #include <boost/core/ignore_unused.hpp>
0018 #include <boost/core/no_exceptions_support.hpp>
0019 #include <boost/core/serialization.hpp>
0020 #include <boost/cstdint.hpp>
0021 #include <boost/mp11/tuple.hpp>
0022 #include <boost/throw_exception.hpp>
0023 #include <boost/unordered/detail/archive_constructed.hpp>
0024 #include <boost/unordered/detail/bad_archive_exception.hpp>
0025 #include <boost/unordered/detail/foa/core.hpp>
0026 #include <boost/unordered/detail/foa/reentrancy_check.hpp>
0027 #include <boost/unordered/detail/foa/rw_spinlock.hpp>
0028 #include <boost/unordered/detail/foa/tuple_rotate_right.hpp>
0029 #include <boost/unordered/detail/serialization_version.hpp>
0030 #include <boost/unordered/detail/static_assert.hpp>
0031 #include <cstddef>
0032 #include <functional>
0033 #include <initializer_list>
0034 #include <iterator>
0035 #include <memory>
0036 #include <new>
0037 #include <type_traits>
0038 #include <tuple>
0039 #include <utility>
0040 
0041 #if !defined(BOOST_UNORDERED_DISABLE_PARALLEL_ALGORITHMS)
0042 #if defined(BOOST_UNORDERED_ENABLE_PARALLEL_ALGORITHMS)|| \
0043     !defined(BOOST_NO_CXX17_HDR_EXECUTION)
0044 #define BOOST_UNORDERED_PARALLEL_ALGORITHMS
0045 #endif
0046 #endif
0047 
0048 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0049 #include <algorithm>
0050 #include <execution>
0051 #endif
0052 
0053 namespace boost{
0054 namespace unordered{
0055 namespace detail{
0056 
0057 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0058 
0059 template<typename ExecutionPolicy>
0060 using is_execution_policy=std::is_execution_policy<
0061   typename std::remove_cv<
0062     typename std::remove_reference<ExecutionPolicy>::type
0063   >::type
0064 >;
0065 
0066 #else
0067 
0068 template<typename ExecutionPolicy>
0069 using is_execution_policy=std::false_type;
0070 
0071 #endif
0072 
0073 namespace foa{
0074 
0075 static constexpr std::size_t cacheline_size=64;
0076 
0077 template<typename T,std::size_t N>
0078 class cache_aligned_array
0079 {
0080 public:
0081   cache_aligned_array(){for(std::size_t n=0;n<N;)::new (data(n++)) T();}
0082   ~cache_aligned_array(){for(auto n=N;n>0;)data(n--)->~T();}
0083   cache_aligned_array(const cache_aligned_array&)=delete;
0084   cache_aligned_array& operator=(const cache_aligned_array&)=delete;
0085 
0086   T& operator[](std::size_t pos)noexcept{return *data(pos);}
0087 
0088 private:
0089   static constexpr std::size_t element_offset=
0090     (sizeof(T)+cacheline_size-1)/cacheline_size*cacheline_size;
0091 
0092   BOOST_UNORDERED_STATIC_ASSERT(alignof(T)<=cacheline_size);
0093 
0094   T* data(std::size_t pos)noexcept
0095   {
0096     return reinterpret_cast<T*>(
0097       (reinterpret_cast<uintptr_t>(&buf)+cacheline_size-1)/
0098         cacheline_size*cacheline_size
0099       +pos*element_offset);
0100   }
0101 
0102   unsigned char buf[element_offset*N+cacheline_size-1];
0103 };
0104 
0105 template<typename Mutex,std::size_t N>
0106 class multimutex
0107 {
0108 public:
0109   constexpr std::size_t size()const noexcept{return N;}
0110 
0111   Mutex& operator[](std::size_t pos)noexcept
0112   {
0113     BOOST_ASSERT(pos<N);
0114     return mutexes[pos];
0115   }
0116 
0117   void lock()noexcept{for(std::size_t n=0;n<N;)mutexes[n++].lock();}
0118   void unlock()noexcept{for(auto n=N;n>0;)mutexes[--n].unlock();}
0119 
0120 private:
0121   cache_aligned_array<Mutex,N> mutexes;
0122 };
0123 
0124 /* std::shared_lock is C++14 */
0125 
0126 template<typename Mutex>
0127 class shared_lock
0128 {
0129 public:
0130   shared_lock(Mutex& m_)noexcept:m(m_){m.lock_shared();}
0131   ~shared_lock()noexcept{if(owns)m.unlock_shared();}
0132 
0133   /* not used but VS in pre-C++17 mode needs to see it for RVO */
0134   shared_lock(const shared_lock&);
0135 
0136   void lock(){BOOST_ASSERT(!owns);m.lock_shared();owns=true;}
0137   void unlock(){BOOST_ASSERT(owns);m.unlock_shared();owns=false;}
0138 
0139 private:
0140   Mutex &m;
0141   bool owns=true;
0142 };
0143 
0144 /* VS in pre-C++17 mode can't implement RVO for std::lock_guard due to
0145  * its copy constructor being deleted.
0146  */
0147 
0148 template<typename Mutex>
0149 class lock_guard
0150 {
0151 public:
0152   lock_guard(Mutex& m_)noexcept:m(m_){m.lock();}
0153   ~lock_guard()noexcept{m.unlock();}
0154 
0155   /* not used but VS in pre-C++17 mode needs to see it for RVO */
0156   lock_guard(const lock_guard&);
0157 
0158 private:
0159   Mutex &m;
0160 };
0161 
0162 /* inspired by boost/multi_index/detail/scoped_bilock.hpp */
0163 
0164 template<typename Mutex>
0165 class scoped_bilock
0166 {
0167 public:
0168   scoped_bilock(Mutex& m1,Mutex& m2)noexcept
0169   {
0170     bool mutex_lt=std::less<Mutex*>{}(&m1,&m2);
0171 
0172     pm1=mutex_lt?&m1:&m2;
0173     pm1->lock();
0174     if(&m1==&m2){
0175       pm2=nullptr;
0176     }
0177     else{
0178       pm2=mutex_lt?&m2:&m1;
0179       pm2->lock();
0180     }
0181   }
0182 
0183   /* not used but VS in pre-C++17 mode needs to see it for RVO */
0184   scoped_bilock(const scoped_bilock&);
0185 
0186   ~scoped_bilock()noexcept
0187   {
0188     if(pm2)pm2->unlock();
0189     pm1->unlock();
0190   }
0191 
0192 private:
0193   Mutex *pm1,*pm2;
0194 };
0195 
0196 /* use atomics for group metadata storage */
0197 
0198 template<typename Integral>
0199 struct atomic_integral
0200 {
0201   operator Integral()const{return n.load(std::memory_order_relaxed);}
0202   void operator=(Integral m){n.store(m,std::memory_order_relaxed);}
0203   void operator|=(Integral m){n.fetch_or(m,std::memory_order_relaxed);}
0204   void operator&=(Integral m){n.fetch_and(m,std::memory_order_relaxed);}
0205 
0206   atomic_integral& operator=(atomic_integral const& rhs) {
0207     n.store(rhs.n.load(std::memory_order_relaxed),std::memory_order_relaxed);
0208     return *this;
0209   }
0210 
0211   std::atomic<Integral> n;
0212 };
0213 
0214 /* Group-level concurrency protection. It provides a rw mutex plus an
0215  * atomic insertion counter for optimistic insertion (see
0216  * unprotected_norehash_emplace_or_visit).
0217  */
0218 
0219 struct group_access
0220 {    
0221   using mutex_type=rw_spinlock;
0222   using shared_lock_guard=shared_lock<mutex_type>;
0223   using exclusive_lock_guard=lock_guard<mutex_type>;
0224   using insert_counter_type=std::atomic<boost::uint32_t>;
0225 
0226   shared_lock_guard    shared_access(){return shared_lock_guard{m};}
0227   exclusive_lock_guard exclusive_access(){return exclusive_lock_guard{m};}
0228   insert_counter_type& insert_counter(){return cnt;}
0229 
0230 private:
0231   mutex_type          m;
0232   insert_counter_type cnt{0};
0233 };
0234 
0235 template<std::size_t Size>
0236 group_access* dummy_group_accesses()
0237 {
0238   /* Default group_access array to provide to empty containers without
0239    * incurring dynamic allocation. Mutexes won't actually ever be used,
0240    * (no successful reduced hash match) and insertion counters won't ever
0241    * be incremented (insertions won't succeed as capacity()==0).
0242    */
0243 
0244   static group_access accesses[Size];
0245 
0246   return accesses;
0247 }
0248 
0249 /* subclasses table_arrays to add an additional group_access array */
0250 
0251 template<typename Value,typename Group,typename SizePolicy,typename Allocator>
0252 struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy,Allocator>
0253 {
0254   using group_access_allocator_type=
0255     typename boost::allocator_rebind<Allocator,group_access>::type;
0256   using group_access_pointer=
0257     typename boost::allocator_pointer<group_access_allocator_type>::type;
0258 
0259   using super=table_arrays<Value,Group,SizePolicy,Allocator>;
0260 
0261   concurrent_table_arrays(const super& arrays,group_access_pointer pga):
0262     super{arrays},group_accesses_{pga}{}
0263 
0264   group_access* group_accesses()const noexcept{
0265     return boost::to_address(group_accesses_);
0266   }
0267 
0268   static concurrent_table_arrays new_(
0269     group_access_allocator_type al,std::size_t n)
0270   {
0271     super x{super::new_(al,n)};
0272     BOOST_TRY{
0273       return new_group_access(al,x);
0274     }
0275     BOOST_CATCH(...){
0276       super::delete_(al,x);
0277       BOOST_RETHROW
0278     }
0279     BOOST_CATCH_END
0280   }
0281 
0282   static void set_group_access(
0283     group_access_allocator_type al,concurrent_table_arrays& arrays)
0284   {
0285     set_group_access(
0286       al,arrays,std::is_same<group_access*,group_access_pointer>{});
0287   }
0288 
0289   static void set_group_access(
0290     group_access_allocator_type al,
0291     concurrent_table_arrays& arrays,
0292     std::false_type /* fancy pointers */)
0293   {
0294     arrays.group_accesses_=
0295         boost::allocator_allocate(al,arrays.groups_size_mask+1);
0296 
0297       for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
0298         ::new (arrays.group_accesses()+i) group_access();
0299       }
0300   }
0301 
0302   static void set_group_access(
0303     group_access_allocator_type al,
0304     concurrent_table_arrays& arrays,
0305     std::true_type /* optimize when elements() is null */)
0306   {
0307     if(!arrays.elements()){
0308       arrays.group_accesses_=
0309         dummy_group_accesses<SizePolicy::min_size()>();
0310     } else {
0311       set_group_access(al,arrays,std::false_type{});
0312     }
0313   }
0314 
0315   static concurrent_table_arrays new_group_access(
0316     group_access_allocator_type al,const super& x)
0317   {
0318     concurrent_table_arrays arrays{x,nullptr};
0319     set_group_access(al,arrays);
0320     return arrays;
0321   }
0322 
0323   static void delete_(
0324     group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept
0325   {
0326     delete_group_access(al,arrays);
0327     super::delete_(al,arrays);
0328   }
0329 
0330   static void delete_group_access(
0331     group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept
0332   {
0333     if(arrays.elements()){
0334       boost::allocator_deallocate(
0335         al,arrays.group_accesses_,arrays.groups_size_mask+1);
0336     }
0337   }
0338 
0339   group_access_pointer group_accesses_;
0340 };
0341 
0342 struct atomic_size_control
0343 {
0344   static constexpr auto atomic_size_t_size=sizeof(std::atomic<std::size_t>);
0345   BOOST_UNORDERED_STATIC_ASSERT(atomic_size_t_size<cacheline_size);
0346 
0347   atomic_size_control(std::size_t ml_,std::size_t size_):
0348     pad0_{},ml{ml_},pad1_{},size{size_}{}
0349   atomic_size_control(const atomic_size_control& x):
0350     pad0_{},ml{x.ml.load()},pad1_{},size{x.size.load()}{}
0351 
0352   /* padding to avoid false sharing internally and with sorrounding data */
0353 
0354   unsigned char            pad0_[cacheline_size-atomic_size_t_size];
0355   std::atomic<std::size_t> ml;
0356   unsigned char            pad1_[cacheline_size-atomic_size_t_size];
0357   std::atomic<std::size_t> size;
0358 };
0359 
0360 /* std::swap can't be used on non-assignable atomics */
0361 
0362 inline void
0363 swap_atomic_size_t(std::atomic<std::size_t>& x,std::atomic<std::size_t>& y)
0364 {
0365   std::size_t tmp=x;
0366   x=static_cast<std::size_t>(y);
0367   y=tmp;
0368 }
0369 
0370 inline void swap(atomic_size_control& x,atomic_size_control& y)
0371 {
0372   swap_atomic_size_t(x.ml,y.ml);
0373   swap_atomic_size_t(x.size,y.size);
0374 }
0375 
0376 /* foa::concurrent_table serves as the foundation for end-user concurrent
0377  * hash containers.
0378  * 
0379  * The exposed interface (completed by the wrapping containers) is not that
0380  * of a regular container (in fact, it does not model Container as understood
0381  * by the C++ standard):
0382  * 
0383  *   - Iterators are not provided as they are not suitable for concurrent
0384  *     scenarios.
0385  *   - As a consequence, composite operations with regular containers
0386  *     (like, for instance, looking up an element and modifying it), must
0387  *     be provided natively without any intervening iterator/accesor.
0388  *     Visitation is a core concept in this design, either on its own (eg.
0389  *     visit(k) locates the element with key k *and* accesses it) or as part
0390  *     of a native composite operation (eg. try_emplace_or_visit). Visitation
0391  *     is constant or mutating depending on whether the used table function is
0392  *     const or not.
0393  *   - The API provides member functions for all the meaningful composite
0394  *     operations of the form "X (and|or) Y", where X, Y are one of the
0395  *     primitives FIND, ACCESS, INSERT or ERASE.
0396  *   - Parallel versions of [c]visit_all(f) and erase_if(f) are provided based
0397  *     on C++17 stdlib parallel algorithms.
0398  * 
0399  * Consult boost::concurrent_flat_(map|set) docs for the full API reference.
0400  * Heterogeneous lookup is suported by default, that is, without checking for
0401  * any ::is_transparent typedefs --this checking is done by the wrapping
0402  * containers.
0403  *
0404  * Thread-safe concurrency is implemented using a two-level lock system:
0405  * 
0406  *   - A first container-level lock is implemented with an array of
0407  *     rw spinlocks acting as a single rw mutex with very little
0408  *     cache-coherence traffic on read (each thread is assigned a different
0409  *     spinlock in the array). Container-level write locking is only used for
0410  *     rehashing and other container-wide operations (assignment, swap, etc.)
0411  *   - Each group of slots has an associated rw spinlock. A thread holds
0412  *     at most one group lock at any given time. Lookup is implemented in
0413  *     a (groupwise) lock-free manner until a reduced hash match is found, in
0414  *     which case the relevant group is locked and the slot is double-checked
0415  *     for occupancy and compared with the key.
0416  *   - Each group has also an associated so-called insertion counter used for
0417  *     the following optimistic insertion algorithm:
0418  *     - The value of the insertion counter for the initial group in the probe
0419  *       sequence is locally recorded (let's call this value c0).
0420  *     - Lookup is as described above. If lookup finds no equivalent element,
0421  *       search for an available slot for insertion successively locks/unlocks
0422  *       each group in the probing sequence.
0423  *     - When an available slot is located, it is preemptively occupied (its
0424  *       reduced hash value is set) and the insertion counter is atomically
0425  *       incremented: if no other thread has incremented the counter during the
0426  *       whole operation (which is checked by comparing with c0), then we're
0427  *       good to go and complete the insertion, otherwise we roll back and
0428  *       start over.
0429  */
0430 
0431 template<typename,typename,typename,typename>
0432 class table; /* concurrent/non-concurrent interop */
0433 
0434 template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
0435 using concurrent_table_core_impl=table_core<
0436   TypePolicy,group15<atomic_integral>,concurrent_table_arrays,
0437   atomic_size_control,Hash,Pred,Allocator>;
0438 
0439 #include <boost/unordered/detail/foa/ignore_wshadow.hpp>
0440 
0441 #if defined(BOOST_MSVC)
0442 #pragma warning(push)
0443 #pragma warning(disable:4714) /* marked as __forceinline not inlined */
0444 #endif
0445 
0446 template<typename TypePolicy,typename Hash,typename Pred,typename Allocator>
0447 class concurrent_table:
0448   concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>
0449 {
0450   using super=concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>;
0451   using type_policy=typename super::type_policy;
0452   using group_type=typename super::group_type;
0453   using super::N;
0454   using prober=typename super::prober;
0455   using arrays_type=typename super::arrays_type;
0456   using size_ctrl_type=typename super::size_ctrl_type;
0457   using compatible_nonconcurrent_table=table<TypePolicy,Hash,Pred,Allocator>;
0458   friend compatible_nonconcurrent_table;
0459 
0460 public:
0461   using key_type=typename super::key_type;
0462   using init_type=typename super::init_type;
0463   using value_type=typename super::value_type;
0464   using element_type=typename super::element_type;
0465   using hasher=typename super::hasher;
0466   using key_equal=typename super::key_equal;
0467   using allocator_type=typename super::allocator_type;
0468   using size_type=typename super::size_type;
0469   static constexpr std::size_t bulk_visit_size=16;
0470 
0471 private:
0472   template<typename Value,typename T>
0473   using enable_if_is_value_type=typename std::enable_if<
0474     !std::is_same<init_type,value_type>::value&&
0475     std::is_same<Value,value_type>::value,
0476     T
0477   >::type;
0478 
0479 public:
0480   concurrent_table(
0481     std::size_t n=default_bucket_count,const Hash& h_=Hash(),
0482     const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
0483     super{n,h_,pred_,al_}
0484     {}
0485 
0486   concurrent_table(const concurrent_table& x):
0487     concurrent_table(x,x.exclusive_access()){}
0488   concurrent_table(concurrent_table&& x):
0489     concurrent_table(std::move(x),x.exclusive_access()){}
0490   concurrent_table(const concurrent_table& x,const Allocator& al_):
0491     concurrent_table(x,al_,x.exclusive_access()){}
0492   concurrent_table(concurrent_table&& x,const Allocator& al_):
0493     concurrent_table(std::move(x),al_,x.exclusive_access()){}
0494 
0495   template<typename ArraysType>
0496   concurrent_table(
0497     compatible_nonconcurrent_table&& x,
0498     arrays_holder<ArraysType,Allocator>&& ah):
0499     super{
0500       std::move(x.h()),std::move(x.pred()),std::move(x.al()),
0501       [&x]{return arrays_type::new_group_access(
0502         x.al(),typename arrays_type::super{
0503           x.arrays.groups_size_index,x.arrays.groups_size_mask,
0504           to_pointer<typename arrays_type::group_type_pointer>(
0505             reinterpret_cast<group_type*>(x.arrays.groups())),
0506           x.arrays.elements_});},
0507       size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}}
0508   {
0509     x.arrays=ah.release();
0510     x.size_ctrl.ml=x.initial_max_load();
0511     x.size_ctrl.size=0;
0512   }
0513 
0514   concurrent_table(compatible_nonconcurrent_table&& x):
0515     concurrent_table(std::move(x),x.make_empty_arrays())
0516   {}
0517 
0518   ~concurrent_table()=default;
0519 
0520   concurrent_table& operator=(const concurrent_table& x)
0521   {
0522     auto lck=exclusive_access(*this,x);
0523     super::operator=(x);
0524     return *this;
0525   }
0526 
0527   concurrent_table& operator=(concurrent_table&& x)noexcept(
0528     noexcept(std::declval<super&>() = std::declval<super&&>()))
0529   {
0530     auto lck=exclusive_access(*this,x);
0531     super::operator=(std::move(x));
0532     return *this;
0533   }
0534 
0535   concurrent_table& operator=(std::initializer_list<value_type> il) {
0536     auto lck=exclusive_access();
0537     super::clear();
0538     super::noshrink_reserve(il.size());
0539     for (auto const& v : il) {
0540       this->unprotected_emplace(v);
0541     }
0542     return *this;
0543   }
0544 
0545   allocator_type get_allocator()const noexcept
0546   {
0547     auto lck=shared_access();
0548     return super::get_allocator();
0549   }
0550 
0551   template<typename Key,typename F>
0552   BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)
0553   {
0554     return visit_impl(group_exclusive{},x,std::forward<F>(f));
0555   }
0556 
0557   template<typename Key,typename F>
0558   BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)const
0559   {
0560     return visit_impl(group_shared{},x,std::forward<F>(f));
0561   }
0562 
0563   template<typename Key,typename F>
0564   BOOST_FORCEINLINE std::size_t cvisit(const Key& x,F&& f)const
0565   {
0566     return visit(x,std::forward<F>(f));
0567   }
0568 
0569   template<typename FwdIterator,typename F>
0570   BOOST_FORCEINLINE
0571   std::size_t visit(FwdIterator first,FwdIterator last,F&& f)
0572   {
0573     return bulk_visit_impl(group_exclusive{},first,last,std::forward<F>(f));
0574   }
0575 
0576   template<typename FwdIterator,typename F>
0577   BOOST_FORCEINLINE
0578   std::size_t visit(FwdIterator first,FwdIterator last,F&& f)const
0579   {
0580     return bulk_visit_impl(group_shared{},first,last,std::forward<F>(f));
0581   }
0582 
0583   template<typename FwdIterator,typename F>
0584   BOOST_FORCEINLINE
0585   std::size_t cvisit(FwdIterator first,FwdIterator last,F&& f)const
0586   {
0587     return visit(first,last,std::forward<F>(f));
0588   }
0589 
0590   template<typename F> std::size_t visit_all(F&& f)
0591   {
0592     return visit_all_impl(group_exclusive{},std::forward<F>(f));
0593   }
0594 
0595   template<typename F> std::size_t visit_all(F&& f)const
0596   {
0597     return visit_all_impl(group_shared{},std::forward<F>(f));
0598   }
0599 
0600   template<typename F> std::size_t cvisit_all(F&& f)const
0601   {
0602     return visit_all(std::forward<F>(f));
0603   }
0604 
0605 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0606   template<typename ExecutionPolicy,typename F>
0607   void visit_all(ExecutionPolicy&& policy,F&& f)
0608   {
0609     visit_all_impl(
0610       group_exclusive{},
0611       std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0612   }
0613 
0614   template<typename ExecutionPolicy,typename F>
0615   void visit_all(ExecutionPolicy&& policy,F&& f)const
0616   {
0617     visit_all_impl(
0618       group_shared{},
0619       std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0620   }
0621 
0622   template<typename ExecutionPolicy,typename F>
0623   void cvisit_all(ExecutionPolicy&& policy,F&& f)const
0624   {
0625     visit_all(std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0626   }
0627 #endif
0628 
0629   template<typename F> bool visit_while(F&& f)
0630   {
0631     return visit_while_impl(group_exclusive{},std::forward<F>(f));
0632   }
0633 
0634   template<typename F> bool visit_while(F&& f)const
0635   {
0636     return visit_while_impl(group_shared{},std::forward<F>(f));
0637   }
0638 
0639   template<typename F> bool cvisit_while(F&& f)const
0640   {
0641     return visit_while(std::forward<F>(f));
0642   }
0643 
0644 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0645   template<typename ExecutionPolicy,typename F>
0646   bool visit_while(ExecutionPolicy&& policy,F&& f)
0647   {
0648     return visit_while_impl(
0649       group_exclusive{},
0650       std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0651   }
0652 
0653   template<typename ExecutionPolicy,typename F>
0654   bool visit_while(ExecutionPolicy&& policy,F&& f)const
0655   {
0656     return visit_while_impl(
0657       group_shared{},
0658       std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0659   }
0660 
0661   template<typename ExecutionPolicy,typename F>
0662   bool cvisit_while(ExecutionPolicy&& policy,F&& f)const
0663   {
0664     return visit_while(
0665       std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0666   }
0667 #endif
0668 
0669   bool empty()const noexcept{return size()==0;}
0670   
0671   std::size_t size()const noexcept
0672   {
0673     auto lck=shared_access();
0674     return unprotected_size();
0675   }
0676 
0677   using super::max_size; 
0678 
0679   template<typename... Args>
0680   BOOST_FORCEINLINE bool emplace(Args&&... args)
0681   {
0682     return construct_and_emplace(std::forward<Args>(args)...);
0683   }
0684 
0685   BOOST_FORCEINLINE bool
0686   insert(const init_type& x){return emplace_impl(x);}
0687 
0688   BOOST_FORCEINLINE bool
0689   insert(init_type&& x){return emplace_impl(std::move(x));}
0690 
0691   /* template<typename=void> tilts call ambiguities in favor of init_type */
0692 
0693   template<typename=void>
0694   BOOST_FORCEINLINE bool
0695   insert(const value_type& x){return emplace_impl(x);}
0696   
0697   template<typename=void>
0698   BOOST_FORCEINLINE bool
0699   insert(value_type&& x){return emplace_impl(std::move(x));}
0700 
0701   template<typename Key,typename... Args>
0702   BOOST_FORCEINLINE bool try_emplace(Key&& x,Args&&... args)
0703   {
0704     return emplace_impl(
0705       try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0706   }
0707 
0708   template<typename Key,typename... Args>
0709   BOOST_FORCEINLINE bool try_emplace_or_visit(Key&& x,Args&&... args)
0710   {
0711     return emplace_or_visit_flast(
0712       group_exclusive{},
0713       try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0714   }
0715 
0716   template<typename Key,typename... Args>
0717   BOOST_FORCEINLINE bool try_emplace_or_cvisit(Key&& x,Args&&... args)
0718   {
0719     return emplace_or_visit_flast(
0720       group_shared{},
0721       try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0722   }
0723 
0724   template<typename... Args>
0725   BOOST_FORCEINLINE bool emplace_or_visit(Args&&... args)
0726   {
0727     return construct_and_emplace_or_visit_flast(
0728       group_exclusive{},std::forward<Args>(args)...);
0729   }
0730 
0731   template<typename... Args>
0732   BOOST_FORCEINLINE bool emplace_or_cvisit(Args&&... args)
0733   {
0734     return construct_and_emplace_or_visit_flast(
0735       group_shared{},std::forward<Args>(args)...);
0736   }
0737 
0738   template<typename F>
0739   BOOST_FORCEINLINE bool insert_or_visit(const init_type& x,F&& f)
0740   {
0741     return emplace_or_visit_impl(group_exclusive{},std::forward<F>(f),x);
0742   }
0743 
0744   template<typename F>
0745   BOOST_FORCEINLINE bool insert_or_cvisit(const init_type& x,F&& f)
0746   {
0747     return emplace_or_visit_impl(group_shared{},std::forward<F>(f),x);
0748   }
0749 
0750   template<typename F>
0751   BOOST_FORCEINLINE bool insert_or_visit(init_type&& x,F&& f)
0752   {
0753     return emplace_or_visit_impl(
0754       group_exclusive{},std::forward<F>(f),std::move(x));
0755   }
0756 
0757   template<typename F>
0758   BOOST_FORCEINLINE bool insert_or_cvisit(init_type&& x,F&& f)
0759   {
0760     return emplace_or_visit_impl(
0761       group_shared{},std::forward<F>(f),std::move(x));
0762   }
0763 
0764   /* SFINAE tilts call ambiguities in favor of init_type */
0765 
0766   template<typename Value,typename F>
0767   BOOST_FORCEINLINE auto insert_or_visit(const Value& x,F&& f)
0768     ->enable_if_is_value_type<Value,bool>
0769   {
0770     return emplace_or_visit_impl(group_exclusive{},std::forward<F>(f),x);
0771   }
0772 
0773   template<typename Value,typename F>
0774   BOOST_FORCEINLINE auto insert_or_cvisit(const Value& x,F&& f)
0775     ->enable_if_is_value_type<Value,bool>
0776   {
0777     return emplace_or_visit_impl(group_shared{},std::forward<F>(f),x);
0778   }
0779 
0780   template<typename Value,typename F>
0781   BOOST_FORCEINLINE auto insert_or_visit(Value&& x,F&& f)
0782     ->enable_if_is_value_type<Value,bool>
0783   {
0784     return emplace_or_visit_impl(
0785       group_exclusive{},std::forward<F>(f),std::move(x));
0786   }
0787 
0788   template<typename Value,typename F>
0789   BOOST_FORCEINLINE auto insert_or_cvisit(Value&& x,F&& f)
0790     ->enable_if_is_value_type<Value,bool>
0791   {
0792     return emplace_or_visit_impl(
0793       group_shared{},std::forward<F>(f),std::move(x));
0794   }
0795 
0796   template<typename Key>
0797   BOOST_FORCEINLINE std::size_t erase(const Key& x)
0798   {
0799     return erase_if(x,[](const value_type&){return true;});
0800   }
0801 
0802   template<typename Key,typename F>
0803   BOOST_FORCEINLINE auto erase_if(const Key& x,F&& f)->typename std::enable_if<
0804     !is_execution_policy<Key>::value,std::size_t>::type
0805   {
0806     auto        lck=shared_access();
0807     auto        hash=this->hash_for(x);
0808     std::size_t res=0;
0809     unprotected_internal_visit(
0810       group_exclusive{},x,this->position_for(hash),hash,
0811       [&,this](group_type* pg,unsigned int n,element_type* p)
0812       {
0813         if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
0814           super::erase(pg,n,p);
0815           res=1;
0816         }
0817       });
0818     return res;
0819   }
0820 
0821   template<typename F>
0822   std::size_t erase_if(F&& f)
0823   {
0824     auto        lck=shared_access();
0825     std::size_t res=0;
0826     for_all_elements(
0827       group_exclusive{},
0828       [&,this](group_type* pg,unsigned int n,element_type* p){
0829         if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
0830           super::erase(pg,n,p);
0831           ++res;
0832         }
0833       });
0834     return res;
0835   }
0836 
0837 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0838   template<typename ExecutionPolicy,typename F>
0839   auto erase_if(ExecutionPolicy&& policy,F&& f)->typename std::enable_if<
0840     is_execution_policy<ExecutionPolicy>::value,void>::type
0841   {
0842     auto lck=shared_access();
0843     for_all_elements(
0844       group_exclusive{},std::forward<ExecutionPolicy>(policy),
0845       [&,this](group_type* pg,unsigned int n,element_type* p){
0846         if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
0847           super::erase(pg,n,p);
0848         }
0849       });
0850   }
0851 #endif
0852 
0853   void swap(concurrent_table& x)
0854     noexcept(noexcept(std::declval<super&>().swap(std::declval<super&>())))
0855   {
0856     auto lck=exclusive_access(*this,x);
0857     super::swap(x);
0858   }
0859 
0860   void clear()noexcept
0861   {
0862     auto lck=exclusive_access();
0863     super::clear();
0864   }
0865 
0866   // TODO: should we accept different allocator too?
0867   template<typename Hash2,typename Pred2>
0868   size_type merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& x)
0869   {
0870     using merge_table_type=concurrent_table<TypePolicy,Hash2,Pred2,Allocator>;
0871     using super2=typename merge_table_type::super;
0872 
0873     // for clang
0874     boost::ignore_unused<super2>();
0875 
0876     auto      lck=exclusive_access(*this,x);
0877     size_type s=super::size();
0878     x.super2::for_all_elements( /* super2::for_all_elements -> unprotected */
0879       [&,this](group_type* pg,unsigned int n,element_type* p){
0880         typename merge_table_type::erase_on_exit e{x,pg,n,p};
0881         if(!unprotected_emplace(type_policy::move(*p)))e.rollback();
0882       });
0883     return size_type{super::size()-s};
0884   }
0885 
0886   template<typename Hash2,typename Pred2>
0887   void merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>&& x){merge(x);}
0888 
0889   hasher hash_function()const
0890   {
0891     auto lck=shared_access();
0892     return super::hash_function();
0893   }
0894 
0895   key_equal key_eq()const
0896   {
0897     auto lck=shared_access();
0898     return super::key_eq();
0899   }
0900 
0901   template<typename Key>
0902   BOOST_FORCEINLINE std::size_t count(Key&& x)const
0903   {
0904     return (std::size_t)contains(std::forward<Key>(x));
0905   }
0906 
0907   template<typename Key>
0908   BOOST_FORCEINLINE bool contains(Key&& x)const
0909   {
0910     return visit(std::forward<Key>(x),[](const value_type&){})!=0;
0911   }
0912 
0913   std::size_t capacity()const noexcept
0914   {
0915     auto lck=shared_access();
0916     return super::capacity();
0917   }
0918 
0919   float load_factor()const noexcept
0920   {
0921     auto lck=shared_access();
0922     if(super::capacity()==0)return 0;
0923     else                    return float(unprotected_size())/
0924                                    float(super::capacity());
0925   }
0926 
0927   using super::max_load_factor;
0928 
0929   std::size_t max_load()const noexcept
0930   {
0931     auto lck=shared_access();
0932     return super::max_load();
0933   }
0934 
0935   void rehash(std::size_t n)
0936   {
0937     auto lck=exclusive_access();
0938     super::rehash(n);
0939   }
0940 
0941   void reserve(std::size_t n)
0942   {
0943     auto lck=exclusive_access();
0944     super::reserve(n);
0945   }
0946 
0947   template<typename Predicate>
0948   friend std::size_t erase_if(concurrent_table& x,Predicate&& pr)
0949   {
0950     return x.erase_if(std::forward<Predicate>(pr));
0951   }
0952 
0953   friend bool operator==(const concurrent_table& x,const concurrent_table& y)
0954   {
0955     auto lck=exclusive_access(x,y);
0956     return static_cast<const super&>(x)==static_cast<const super&>(y);
0957   }
0958 
0959   friend bool operator!=(const concurrent_table& x,const concurrent_table& y)
0960   {
0961     return !(x==y);
0962   }
0963 
0964 private:
0965   template<typename,typename,typename,typename> friend class concurrent_table;
0966 
0967   using mutex_type=rw_spinlock;
0968   using multimutex_type=multimutex<mutex_type,128>; // TODO: adapt 128 to the machine
0969   using shared_lock_guard=reentrancy_checked<shared_lock<mutex_type>>;
0970   using exclusive_lock_guard=reentrancy_checked<lock_guard<multimutex_type>>;
0971   using exclusive_bilock_guard=
0972     reentrancy_bichecked<scoped_bilock<multimutex_type>>;
0973   using group_shared_lock_guard=typename group_access::shared_lock_guard;
0974   using group_exclusive_lock_guard=typename group_access::exclusive_lock_guard;
0975   using group_insert_counter_type=typename group_access::insert_counter_type;
0976 
0977   concurrent_table(const concurrent_table& x,exclusive_lock_guard):
0978     super{x}{}
0979   concurrent_table(concurrent_table&& x,exclusive_lock_guard):
0980     super{std::move(x)}{}
0981   concurrent_table(
0982     const concurrent_table& x,const Allocator& al_,exclusive_lock_guard):
0983     super{x,al_}{}
0984   concurrent_table(
0985     concurrent_table&& x,const Allocator& al_,exclusive_lock_guard):
0986     super{std::move(x),al_}{}
0987 
0988   inline shared_lock_guard shared_access()const
0989   {
0990     thread_local auto id=(++thread_counter)%mutexes.size();
0991 
0992     return shared_lock_guard{this,mutexes[id]};
0993   }
0994 
0995   inline exclusive_lock_guard exclusive_access()const
0996   {
0997     return exclusive_lock_guard{this,mutexes};
0998   }
0999 
1000   static inline exclusive_bilock_guard exclusive_access(
1001     const concurrent_table& x,const concurrent_table& y)
1002   {
1003     return {&x,&y,x.mutexes,y.mutexes};
1004   }
1005 
1006   template<typename Hash2,typename Pred2>
1007   static inline exclusive_bilock_guard exclusive_access(
1008     const concurrent_table& x,
1009     const concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& y)
1010   {
1011     return {&x,&y,x.mutexes,y.mutexes};
1012   }
1013 
1014   /* Tag-dispatched shared/exclusive group access */
1015 
1016   using group_shared=std::false_type;
1017   using group_exclusive=std::true_type;
1018 
1019   inline group_shared_lock_guard access(group_shared,std::size_t pos)const
1020   {
1021     return this->arrays.group_accesses()[pos].shared_access();
1022   }
1023 
1024   inline group_exclusive_lock_guard access(
1025     group_exclusive,std::size_t pos)const
1026   {
1027     return this->arrays.group_accesses()[pos].exclusive_access();
1028   }
1029 
1030   inline group_insert_counter_type& insert_counter(std::size_t pos)const
1031   {
1032     return this->arrays.group_accesses()[pos].insert_counter();
1033   }
1034 
1035   /* Const casts value_type& according to the level of group access for
1036    * safe passing to visitation functions. When type_policy is set-like,
1037    * access is always const regardless of group access.
1038    */
1039 
1040   static inline const value_type&
1041   cast_for(group_shared,value_type& x){return x;}
1042 
1043   static inline typename std::conditional<
1044     std::is_same<key_type,value_type>::value,
1045     const value_type&,
1046     value_type&
1047   >::type
1048   cast_for(group_exclusive,value_type& x){return x;}
1049 
1050   struct erase_on_exit
1051   {
1052     erase_on_exit(
1053       concurrent_table& x_,
1054       group_type* pg_,unsigned int pos_,element_type* p_):
1055       x(x_),pg(pg_),pos(pos_),p(p_){}
1056     ~erase_on_exit(){if(!rollback_)x.super::erase(pg,pos,p);}
1057 
1058     void rollback(){rollback_=true;}
1059 
1060     concurrent_table &x;
1061     group_type       *pg;
1062     unsigned  int     pos;
1063     element_type     *p;
1064     bool              rollback_=false;
1065   };
1066 
1067   template<typename GroupAccessMode,typename Key,typename F>
1068   BOOST_FORCEINLINE std::size_t visit_impl(
1069     GroupAccessMode access_mode,const Key& x,F&& f)const
1070   {
1071     auto lck=shared_access();
1072     auto hash=this->hash_for(x);
1073     return unprotected_visit(
1074       access_mode,x,this->position_for(hash),hash,std::forward<F>(f));
1075   }
1076 
1077   template<typename GroupAccessMode,typename FwdIterator,typename F>
1078   BOOST_FORCEINLINE
1079   std::size_t bulk_visit_impl(
1080     GroupAccessMode access_mode,FwdIterator first,FwdIterator last,F&& f)const
1081   {
1082     auto        lck=shared_access();
1083     std::size_t res=0;
1084     auto        n=static_cast<std::size_t>(std::distance(first,last));
1085     while(n){
1086       auto m=n<2*bulk_visit_size?n:bulk_visit_size;
1087       res+=unprotected_bulk_visit(access_mode,first,m,std::forward<F>(f));
1088       n-=m;
1089       std::advance(
1090         first,
1091         static_cast<
1092           typename std::iterator_traits<FwdIterator>::difference_type>(m));
1093     }
1094     return res;
1095   }
1096 
1097   template<typename GroupAccessMode,typename F>
1098   std::size_t visit_all_impl(GroupAccessMode access_mode,F&& f)const
1099   {
1100     auto lck=shared_access();
1101     std::size_t res=0;
1102     for_all_elements(access_mode,[&](element_type* p){
1103       f(cast_for(access_mode,type_policy::value_from(*p)));
1104       ++res;
1105     });
1106     return res;
1107   }
1108 
1109 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
1110   template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1111   void visit_all_impl(
1112     GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
1113   {
1114     auto lck=shared_access();
1115     for_all_elements(
1116       access_mode,std::forward<ExecutionPolicy>(policy),
1117       [&](element_type* p){
1118         f(cast_for(access_mode,type_policy::value_from(*p)));
1119       });
1120   }
1121 #endif
1122 
1123   template<typename GroupAccessMode,typename F>
1124   bool visit_while_impl(GroupAccessMode access_mode,F&& f)const
1125   {
1126     auto lck=shared_access();
1127     return for_all_elements_while(access_mode,[&](element_type* p){
1128       return f(cast_for(access_mode,type_policy::value_from(*p)));
1129     });
1130   }
1131 
1132 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
1133   template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1134   bool visit_while_impl(
1135     GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
1136   {
1137     auto lck=shared_access();
1138     return for_all_elements_while(
1139       access_mode,std::forward<ExecutionPolicy>(policy),
1140       [&](element_type* p){
1141         return f(cast_for(access_mode,type_policy::value_from(*p)));
1142       });
1143   }
1144 #endif
1145 
1146   template<typename GroupAccessMode,typename Key,typename F>
1147   BOOST_FORCEINLINE std::size_t unprotected_visit(
1148     GroupAccessMode access_mode,
1149     const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
1150   {
1151     return unprotected_internal_visit(
1152       access_mode,x,pos0,hash,
1153       [&](group_type*,unsigned int,element_type* p)
1154         {f(cast_for(access_mode,type_policy::value_from(*p)));});
1155   }
1156 
1157 #if defined(BOOST_MSVC)
1158 /* warning: forcing value to bool 'true' or 'false' in bool(pred()...) */
1159 #pragma warning(push)
1160 #pragma warning(disable:4800)
1161 #endif
1162 
1163   template<typename GroupAccessMode,typename Key,typename F>
1164   BOOST_FORCEINLINE std::size_t unprotected_internal_visit(
1165     GroupAccessMode access_mode,
1166     const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
1167   {    
1168     prober pb(pos0);
1169     do{
1170       auto pos=pb.get();
1171       auto pg=this->arrays.groups()+pos;
1172       auto mask=pg->match(hash);
1173       if(mask){
1174         auto p=this->arrays.elements()+pos*N;
1175         BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
1176         auto lck=access(access_mode,pos);
1177         do{
1178           auto n=unchecked_countr_zero(mask);
1179           if(BOOST_LIKELY(
1180             pg->is_occupied(n)&&bool(this->pred()(x,this->key_from(p[n]))))){
1181             f(pg,n,p+n);
1182             return 1;
1183           }
1184           mask&=mask-1;
1185         }while(mask);
1186       }
1187       if(BOOST_LIKELY(pg->is_not_overflowed(hash))){
1188         return 0;
1189       }
1190     }
1191     while(BOOST_LIKELY(pb.next(this->arrays.groups_size_mask)));
1192     return 0;
1193   }
1194 
1195  template<typename GroupAccessMode,typename FwdIterator,typename F>
1196   BOOST_FORCEINLINE std::size_t unprotected_bulk_visit(
1197     GroupAccessMode access_mode,FwdIterator first,std::size_t m,F&& f)const
1198   {
1199     BOOST_ASSERT(m<2*bulk_visit_size);
1200 
1201     std::size_t res=0,
1202                 hashes[2*bulk_visit_size-1],
1203                 positions[2*bulk_visit_size-1];
1204     int         masks[2*bulk_visit_size-1];
1205     auto        it=first;
1206 
1207     for(auto i=m;i--;++it){
1208       auto hash=hashes[i]=this->hash_for(*it);
1209       auto pos=positions[i]=this->position_for(hash);
1210       BOOST_UNORDERED_PREFETCH(this->arrays.groups()+pos);
1211     }
1212 
1213     for(auto i=m;i--;){
1214       auto hash=hashes[i];
1215       auto pos=positions[i];
1216       auto mask=masks[i]=(this->arrays.groups()+pos)->match(hash);
1217       if(mask){
1218         BOOST_UNORDERED_PREFETCH(this->arrays.group_accesses()+pos);
1219         BOOST_UNORDERED_PREFETCH(
1220           this->arrays.elements()+pos*N+unchecked_countr_zero(mask));
1221       }
1222     }
1223 
1224     it=first;
1225     for(auto i=m;i--;++it){
1226       auto          pos=positions[i];
1227       prober        pb(pos);
1228       auto          pg=this->arrays.groups()+pos;
1229       auto          mask=masks[i];
1230       element_type *p;
1231       if(!mask)goto post_mask;
1232       p=this->arrays.elements()+pos*N;
1233       for(;;){
1234         {
1235           auto lck=access(access_mode,pos);
1236           do{
1237             auto n=unchecked_countr_zero(mask);
1238             if(BOOST_LIKELY(
1239               pg->is_occupied(n)&&
1240               bool(this->pred()(*it,this->key_from(p[n]))))){
1241               f(cast_for(access_mode,type_policy::value_from(p[n])));
1242               ++res;
1243               goto next_key;
1244             }
1245             mask&=mask-1;
1246           }while(mask);
1247         }
1248       post_mask:
1249         do{
1250           if(BOOST_LIKELY(pg->is_not_overflowed(hashes[i]))||
1251              BOOST_UNLIKELY(!pb.next(this->arrays.groups_size_mask))){
1252             goto next_key;
1253           }
1254           pos=pb.get();
1255           pg=this->arrays.groups()+pos;
1256           mask=pg->match(hashes[i]);
1257         }while(!mask);
1258         p=this->arrays.elements()+pos*N;
1259         BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
1260       }
1261       next_key:;
1262     }
1263     return res;
1264   }
1265 
1266 #if defined(BOOST_MSVC)
1267 #pragma warning(pop) /* C4800 */
1268 #endif
1269 
1270   std::size_t unprotected_size()const
1271   {
1272     std::size_t m=this->size_ctrl.ml;
1273     std::size_t s=this->size_ctrl.size;
1274     return s<=m?s:m;
1275   }
1276 
1277   template<typename... Args>
1278   BOOST_FORCEINLINE bool construct_and_emplace(Args&&... args)
1279   {
1280     return construct_and_emplace_or_visit(
1281       group_shared{},[](const value_type&){},std::forward<Args>(args)...);
1282   }
1283 
1284   struct call_construct_and_emplace_or_visit
1285   {
1286     template<typename... Args>
1287     BOOST_FORCEINLINE bool operator()(
1288       concurrent_table* this_,Args&&... args)const
1289     {
1290       return this_->construct_and_emplace_or_visit(
1291         std::forward<Args>(args)...);
1292     }
1293   };
1294 
1295   template<typename GroupAccessMode,typename... Args>
1296   BOOST_FORCEINLINE bool construct_and_emplace_or_visit_flast(
1297     GroupAccessMode access_mode,Args&&... args)
1298   {
1299     return mp11::tuple_apply(
1300       call_construct_and_emplace_or_visit{},
1301       std::tuple_cat(
1302         std::make_tuple(this,access_mode),
1303         tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...))
1304       )
1305     );
1306   }
1307 
1308   template<typename GroupAccessMode,typename F,typename... Args>
1309   BOOST_FORCEINLINE bool construct_and_emplace_or_visit(
1310     GroupAccessMode access_mode,F&& f,Args&&... args)
1311   {
1312     auto lck=shared_access();
1313 
1314     auto x=alloc_make_insert_type<type_policy>(
1315       this->al(),std::forward<Args>(args)...);
1316     int res=unprotected_norehash_emplace_or_visit(
1317       access_mode,std::forward<F>(f),type_policy::move(x.value()));
1318     if(BOOST_LIKELY(res>=0))return res!=0;
1319 
1320     lck.unlock();
1321 
1322     rehash_if_full();
1323     return noinline_emplace_or_visit(
1324       access_mode,std::forward<F>(f),type_policy::move(x.value()));
1325   }
1326 
1327   template<typename... Args>
1328   BOOST_FORCEINLINE bool emplace_impl(Args&&... args)
1329   {
1330     return emplace_or_visit_impl(
1331       group_shared{},[](const value_type&){},std::forward<Args>(args)...);
1332   }
1333 
1334   template<typename GroupAccessMode,typename F,typename... Args>
1335   BOOST_NOINLINE bool noinline_emplace_or_visit(
1336     GroupAccessMode access_mode,F&& f,Args&&... args)
1337   {
1338     return emplace_or_visit_impl(
1339       access_mode,std::forward<F>(f),std::forward<Args>(args)...);
1340   }
1341 
1342   struct call_emplace_or_visit_impl
1343   {
1344     template<typename... Args>
1345     BOOST_FORCEINLINE bool operator()(
1346       concurrent_table* this_,Args&&... args)const
1347     {
1348       return this_->emplace_or_visit_impl(std::forward<Args>(args)...);
1349     }
1350   };
1351 
1352   template<typename GroupAccessMode,typename... Args>
1353   BOOST_FORCEINLINE bool emplace_or_visit_flast(
1354     GroupAccessMode access_mode,Args&&... args)
1355   {
1356     return mp11::tuple_apply(
1357       call_emplace_or_visit_impl{},
1358       std::tuple_cat(
1359         std::make_tuple(this,access_mode),
1360         tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...))
1361       )
1362     );
1363   }
1364 
1365   template<typename GroupAccessMode,typename F,typename... Args>
1366   BOOST_FORCEINLINE bool emplace_or_visit_impl(
1367     GroupAccessMode access_mode,F&& f,Args&&... args)
1368   {
1369     for(;;){
1370       {
1371         auto lck=shared_access();
1372         int res=unprotected_norehash_emplace_or_visit(
1373           access_mode,std::forward<F>(f),std::forward<Args>(args)...);
1374         if(BOOST_LIKELY(res>=0))return res!=0;
1375       }
1376       rehash_if_full();
1377     }
1378   }
1379 
1380   template<typename... Args>
1381   BOOST_FORCEINLINE bool unprotected_emplace(Args&&... args)
1382   {
1383     const auto &k=this->key_from(std::forward<Args>(args)...);
1384     auto        hash=this->hash_for(k);
1385     auto        pos0=this->position_for(hash);
1386 
1387     if(this->find(k,pos0,hash))return false;
1388 
1389     if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){
1390       this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...);
1391     }
1392     else{
1393       this->unchecked_emplace_with_rehash(hash,std::forward<Args>(args)...);
1394     }
1395     return true;
1396   }
1397 
1398   struct reserve_size
1399   {
1400     reserve_size(concurrent_table& x_):x(x_)
1401     {
1402       size_=++x.size_ctrl.size;
1403     }
1404 
1405     ~reserve_size()
1406     {
1407       if(!commit_)--x.size_ctrl.size;
1408     }
1409 
1410     bool succeeded()const{return size_<=x.size_ctrl.ml;}
1411 
1412     void commit(){commit_=true;}
1413 
1414     concurrent_table &x;
1415     std::size_t       size_;
1416     bool              commit_=false;
1417   };
1418 
1419   struct reserve_slot
1420   {
1421     reserve_slot(group_type* pg_,std::size_t pos_,std::size_t hash):
1422       pg{pg_},pos{pos_}
1423     {
1424       pg->set(pos,hash);
1425     }
1426 
1427     ~reserve_slot()
1428     {
1429       if(!commit_)pg->reset(pos);
1430     }
1431 
1432     void commit(){commit_=true;}
1433 
1434     group_type  *pg;
1435     std::size_t pos;
1436     bool        commit_=false;
1437   };
1438 
1439   template<typename GroupAccessMode,typename F,typename... Args>
1440   BOOST_FORCEINLINE int
1441   unprotected_norehash_emplace_or_visit(
1442     GroupAccessMode access_mode,F&& f,Args&&... args)
1443   {
1444     const auto &k=this->key_from(std::forward<Args>(args)...);
1445     auto        hash=this->hash_for(k);
1446     auto        pos0=this->position_for(hash);
1447 
1448     for(;;){
1449     startover:
1450       boost::uint32_t counter=insert_counter(pos0);
1451       if(unprotected_visit(
1452         access_mode,k,pos0,hash,std::forward<F>(f)))return 0;
1453 
1454       reserve_size rsize(*this);
1455       if(BOOST_LIKELY(rsize.succeeded())){
1456         for(prober pb(pos0);;pb.next(this->arrays.groups_size_mask)){
1457           auto pos=pb.get();
1458           auto pg=this->arrays.groups()+pos;
1459           auto lck=access(group_exclusive{},pos);
1460           auto mask=pg->match_available();
1461           if(BOOST_LIKELY(mask!=0)){
1462             auto n=unchecked_countr_zero(mask);
1463             reserve_slot rslot{pg,n,hash};
1464             if(BOOST_UNLIKELY(insert_counter(pos0)++!=counter)){
1465               /* other thread inserted from pos0, need to start over */
1466               goto startover;
1467             }
1468             auto p=this->arrays.elements()+pos*N+n;
1469             this->construct_element(p,std::forward<Args>(args)...);
1470             rslot.commit();
1471             rsize.commit();
1472             return 1;
1473           }
1474           pg->mark_overflow(hash);
1475         }
1476       }
1477       else return -1;
1478     }
1479   }
1480 
1481   void rehash_if_full()
1482   {
1483     auto lck=exclusive_access();
1484     if(this->size_ctrl.size==this->size_ctrl.ml){
1485       this->unchecked_rehash_for_growth();
1486     }
1487   }
1488 
1489   template<typename GroupAccessMode,typename F>
1490   auto for_all_elements(GroupAccessMode access_mode,F f)const
1491     ->decltype(f(nullptr),void())
1492   {
1493     for_all_elements(
1494       access_mode,[&](group_type*,unsigned int,element_type* p){f(p);});
1495   }
1496 
1497   template<typename GroupAccessMode,typename F>
1498   auto for_all_elements(GroupAccessMode access_mode,F f)const
1499     ->decltype(f(nullptr,0,nullptr),void())
1500   {
1501     for_all_elements_while(
1502       access_mode,[&](group_type* pg,unsigned int n,element_type* p)
1503         {f(pg,n,p);return true;});
1504   }
1505 
1506   template<typename GroupAccessMode,typename F>
1507   auto for_all_elements_while(GroupAccessMode access_mode,F f)const
1508     ->decltype(f(nullptr),bool())
1509   {
1510     return for_all_elements_while(
1511       access_mode,[&](group_type*,unsigned int,element_type* p){return f(p);});
1512   }
1513 
1514   template<typename GroupAccessMode,typename F>
1515   auto for_all_elements_while(GroupAccessMode access_mode,F f)const
1516     ->decltype(f(nullptr,0,nullptr),bool())
1517   {
1518     auto p=this->arrays.elements();
1519     if(p){
1520       for(auto pg=this->arrays.groups(),last=pg+this->arrays.groups_size_mask+1;
1521           pg!=last;++pg,p+=N){
1522         auto lck=access(access_mode,(std::size_t)(pg-this->arrays.groups()));
1523         auto mask=this->match_really_occupied(pg,last);
1524         while(mask){
1525           auto n=unchecked_countr_zero(mask);
1526           if(!f(pg,n,p+n))return false;
1527           mask&=mask-1;
1528         }
1529       }
1530     }
1531     return true;
1532   }
1533 
1534 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
1535   template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1536   auto for_all_elements(
1537     GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
1538     ->decltype(f(nullptr),void())
1539   {
1540     for_all_elements(
1541       access_mode,std::forward<ExecutionPolicy>(policy),
1542       [&](group_type*,unsigned int,element_type* p){f(p);});
1543   }
1544 
1545   template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1546   auto for_all_elements(
1547     GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
1548     ->decltype(f(nullptr,0,nullptr),void())
1549   {
1550     if(!this->arrays.elements())return;
1551     auto first=this->arrays.groups(),
1552          last=first+this->arrays.groups_size_mask+1;
1553     std::for_each(std::forward<ExecutionPolicy>(policy),first,last,
1554       [&,this](group_type& g){
1555         auto pos=static_cast<std::size_t>(&g-first);
1556         auto p=this->arrays.elements()+pos*N;
1557         auto lck=access(access_mode,pos);
1558         auto mask=this->match_really_occupied(&g,last);
1559         while(mask){
1560           auto n=unchecked_countr_zero(mask);
1561           f(&g,n,p+n);
1562           mask&=mask-1;
1563         }
1564       }
1565     );
1566   }
1567 
1568   template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1569   bool for_all_elements_while(
1570     GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
1571   {
1572     if(!this->arrays.elements())return true;
1573     auto first=this->arrays.groups(),
1574          last=first+this->arrays.groups_size_mask+1;
1575     return std::all_of(std::forward<ExecutionPolicy>(policy),first,last,
1576       [&,this](group_type& g){
1577         auto pos=static_cast<std::size_t>(&g-first);
1578         auto p=this->arrays.elements()+pos*N;
1579         auto lck=access(access_mode,pos);
1580         auto mask=this->match_really_occupied(&g,last);
1581         while(mask){
1582           auto n=unchecked_countr_zero(mask);
1583           if(!f(p+n))return false;
1584           mask&=mask-1;
1585         }
1586         return true;
1587       }
1588     );
1589   }
1590 #endif
1591 
1592   friend class boost::serialization::access;
1593 
1594   template<typename Archive>
1595   void serialize(Archive& ar,unsigned int version)
1596   {
1597     core::split_member(ar,*this,version);
1598   }
1599 
1600   template<typename Archive>
1601   void save(Archive& ar,unsigned int version)const
1602   {
1603     save(
1604       ar,version,
1605       std::integral_constant<bool,std::is_same<key_type,value_type>::value>{});
1606   }
1607 
1608   template<typename Archive>
1609   void save(Archive& ar,unsigned int,std::true_type /* set */)const
1610   {
1611     auto                                    lck=exclusive_access();
1612     const std::size_t                       s=super::size();
1613     const serialization_version<value_type> value_version;
1614 
1615     ar<<core::make_nvp("count",s);
1616     ar<<core::make_nvp("value_version",value_version);
1617 
1618     super::for_all_elements([&,this](element_type* p){
1619       auto& x=type_policy::value_from(*p);
1620       core::save_construct_data_adl(ar,std::addressof(x),value_version);
1621       ar<<serialization::make_nvp("item",x);
1622     });
1623   }
1624 
1625   template<typename Archive>
1626   void save(Archive& ar,unsigned int,std::false_type /* map */)const
1627   {
1628     using raw_key_type=typename std::remove_const<key_type>::type;
1629     using raw_mapped_type=typename std::remove_const<
1630       typename TypePolicy::mapped_type>::type;
1631 
1632     auto                                         lck=exclusive_access();
1633     const std::size_t                            s=super::size();
1634     const serialization_version<raw_key_type>    key_version;
1635     const serialization_version<raw_mapped_type> mapped_version;
1636 
1637     ar<<core::make_nvp("count",s);
1638     ar<<core::make_nvp("key_version",key_version);
1639     ar<<core::make_nvp("mapped_version",mapped_version);
1640 
1641     super::for_all_elements([&,this](element_type* p){
1642       /* To remain lib-independent from Boost.Serialization and not rely on
1643        * the user having included the serialization code for std::pair
1644        * (boost/serialization/utility.hpp), we serialize the key and the
1645        * mapped value separately.
1646        */
1647 
1648       auto& x=type_policy::value_from(*p);
1649       core::save_construct_data_adl(
1650         ar,std::addressof(x.first),key_version);
1651       ar<<serialization::make_nvp("key",x.first);
1652       core::save_construct_data_adl(
1653         ar,std::addressof(x.second),mapped_version);
1654       ar<<serialization::make_nvp("mapped",x.second);
1655     });
1656   }
1657 
1658   template<typename Archive>
1659   void load(Archive& ar,unsigned int version)
1660   {
1661     load(
1662       ar,version,
1663       std::integral_constant<bool,std::is_same<key_type,value_type>::value>{});
1664   }
1665 
1666   template<typename Archive>
1667   void load(Archive& ar,unsigned int,std::true_type /* set */)
1668   {
1669     auto                              lck=exclusive_access();
1670     std::size_t                       s;
1671     serialization_version<value_type> value_version;
1672 
1673     ar>>core::make_nvp("count",s);
1674     ar>>core::make_nvp("value_version",value_version);
1675 
1676     super::clear();
1677     super::reserve(s);
1678 
1679     for(std::size_t n=0;n<s;++n){
1680       archive_constructed<value_type> value("item",ar,value_version);
1681       auto&                           x=value.get();
1682       auto                            hash=this->hash_for(x);
1683       auto                            pos0=this->position_for(hash);
1684 
1685       if(this->find(x,pos0,hash))throw_exception(bad_archive_exception());
1686       auto loc=this->unchecked_emplace_at(pos0,hash,std::move(x));
1687       ar.reset_object_address(std::addressof(*loc.p),std::addressof(x));
1688     }
1689   }
1690 
1691   template<typename Archive>
1692   void load(Archive& ar,unsigned int,std::false_type /* map */)
1693   {
1694     using raw_key_type=typename std::remove_const<key_type>::type;
1695     using raw_mapped_type=typename std::remove_const<
1696       typename TypePolicy::mapped_type>::type;
1697 
1698     auto                                   lck=exclusive_access();
1699     std::size_t                            s;
1700     serialization_version<raw_key_type>    key_version;
1701     serialization_version<raw_mapped_type> mapped_version;
1702 
1703     ar>>core::make_nvp("count",s);
1704     ar>>core::make_nvp("key_version",key_version);
1705     ar>>core::make_nvp("mapped_version",mapped_version);
1706 
1707     super::clear();
1708     super::reserve(s);
1709 
1710     for(std::size_t n=0;n<s;++n){
1711       archive_constructed<raw_key_type>    key("key",ar,key_version);
1712       archive_constructed<raw_mapped_type> mapped("mapped",ar,mapped_version);
1713       auto&                                k=key.get();
1714       auto&                                m=mapped.get();
1715       auto                                 hash=this->hash_for(k);
1716       auto                                 pos0=this->position_for(hash);
1717 
1718       if(this->find(k,pos0,hash))throw_exception(bad_archive_exception());
1719       auto loc=this->unchecked_emplace_at(pos0,hash,std::move(k),std::move(m));
1720       ar.reset_object_address(std::addressof(loc.p->first),std::addressof(k));
1721       ar.reset_object_address(std::addressof(loc.p->second),std::addressof(m));
1722     }
1723   }
1724 
1725   static std::atomic<std::size_t> thread_counter;
1726   mutable multimutex_type         mutexes;
1727 };
1728 
1729 template<typename T,typename H,typename P,typename A>
1730 std::atomic<std::size_t> concurrent_table<T,H,P,A>::thread_counter={};
1731 
1732 #if defined(BOOST_MSVC)
1733 #pragma warning(pop) /* C4714 */
1734 #endif
1735 
1736 #include <boost/unordered/detail/foa/restore_wshadow.hpp>
1737 
1738 } /* namespace foa */
1739 } /* namespace detail */
1740 } /* namespace unordered */
1741 } /* namespace boost */
1742 
1743 #endif