File indexing completed on 2025-06-30 08:32:07
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP
0013 #define BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP
0014
0015 #include <atomic>
0016 #include <boost/assert.hpp>
0017 #include <boost/config.hpp>
0018 #include <boost/core/ignore_unused.hpp>
0019 #include <boost/core/no_exceptions_support.hpp>
0020 #include <boost/core/serialization.hpp>
0021 #include <boost/cstdint.hpp>
0022 #include <boost/mp11/tuple.hpp>
0023 #include <boost/throw_exception.hpp>
0024 #include <boost/unordered/detail/archive_constructed.hpp>
0025 #include <boost/unordered/detail/bad_archive_exception.hpp>
0026 #include <boost/unordered/detail/foa/core.hpp>
0027 #include <boost/unordered/detail/foa/reentrancy_check.hpp>
0028 #include <boost/unordered/detail/foa/rw_spinlock.hpp>
0029 #include <boost/unordered/detail/foa/tuple_rotate_right.hpp>
0030 #include <boost/unordered/detail/serialization_version.hpp>
0031 #include <boost/unordered/detail/static_assert.hpp>
0032 #include <boost/unordered/detail/type_traits.hpp>
0033 #include <cstddef>
0034 #include <functional>
0035 #include <initializer_list>
0036 #include <iterator>
0037 #include <memory>
0038 #include <new>
0039 #include <type_traits>
0040 #include <tuple>
0041 #include <utility>
0042
0043 #if !defined(BOOST_UNORDERED_DISABLE_PARALLEL_ALGORITHMS)
0044 #if defined(BOOST_UNORDERED_ENABLE_PARALLEL_ALGORITHMS)|| \
0045 !defined(BOOST_NO_CXX17_HDR_EXECUTION)
0046 #define BOOST_UNORDERED_PARALLEL_ALGORITHMS
0047 #endif
0048 #endif
0049
0050 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0051 #include <algorithm>
0052 #include <execution>
0053 #endif
0054
0055 namespace boost{
0056 namespace unordered{
0057 namespace detail{
0058
0059 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0060
0061 template<typename ExecutionPolicy>
0062 using is_execution_policy=std::is_execution_policy<
0063 typename std::remove_cv<
0064 typename std::remove_reference<ExecutionPolicy>::type
0065 >::type
0066 >;
0067
0068 #else
0069
0070 template<typename ExecutionPolicy>
0071 using is_execution_policy=std::false_type;
0072
0073 #endif
0074
0075 namespace foa{
0076
0077 static constexpr std::size_t cacheline_size=64;
0078
0079 template<typename T,std::size_t N>
0080 class cache_aligned_array
0081 {
0082 public:
0083 cache_aligned_array(){for(std::size_t n=0;n<N;)::new (data(n++)) T();}
0084 ~cache_aligned_array(){for(auto n=N;n>0;)data(n--)->~T();}
0085 cache_aligned_array(const cache_aligned_array&)=delete;
0086 cache_aligned_array& operator=(const cache_aligned_array&)=delete;
0087
0088 T& operator[](std::size_t pos)noexcept{return *data(pos);}
0089
0090 private:
0091 static constexpr std::size_t element_offset=
0092 (sizeof(T)+cacheline_size-1)/cacheline_size*cacheline_size;
0093
0094 BOOST_UNORDERED_STATIC_ASSERT(alignof(T)<=cacheline_size);
0095
0096 T* data(std::size_t pos)noexcept
0097 {
0098 return reinterpret_cast<T*>(
0099 (reinterpret_cast<uintptr_t>(&buf)+cacheline_size-1)/
0100 cacheline_size*cacheline_size
0101 +pos*element_offset);
0102 }
0103
0104 unsigned char buf[element_offset*N+cacheline_size-1];
0105 };
0106
0107 template<typename Mutex,std::size_t N>
0108 class multimutex
0109 {
0110 public:
0111 constexpr std::size_t size()const noexcept{return N;}
0112
0113 Mutex& operator[](std::size_t pos)noexcept
0114 {
0115 BOOST_ASSERT(pos<N);
0116 return mutexes[pos];
0117 }
0118
0119 void lock()noexcept{for(std::size_t n=0;n<N;)mutexes[n++].lock();}
0120 void unlock()noexcept{for(auto n=N;n>0;)mutexes[--n].unlock();}
0121
0122 private:
0123 cache_aligned_array<Mutex,N> mutexes;
0124 };
0125
0126
0127
0128 template<typename Mutex>
0129 class shared_lock
0130 {
0131 public:
0132 shared_lock(Mutex& m_)noexcept:m(m_){m.lock_shared();}
0133 ~shared_lock()noexcept{if(owns)m.unlock_shared();}
0134
0135
0136 shared_lock(const shared_lock&);
0137
0138 void lock(){BOOST_ASSERT(!owns);m.lock_shared();owns=true;}
0139 void unlock(){BOOST_ASSERT(owns);m.unlock_shared();owns=false;}
0140
0141 private:
0142 Mutex &m;
0143 bool owns=true;
0144 };
0145
0146
0147
0148
0149
0150 template<typename Mutex>
0151 class lock_guard
0152 {
0153 public:
0154 lock_guard(Mutex& m_)noexcept:m(m_){m.lock();}
0155 ~lock_guard()noexcept{m.unlock();}
0156
0157
0158 lock_guard(const lock_guard&);
0159
0160 private:
0161 Mutex &m;
0162 };
0163
0164
0165
0166 template<typename Mutex>
0167 class scoped_bilock
0168 {
0169 public:
0170 scoped_bilock(Mutex& m1,Mutex& m2)noexcept
0171 {
0172 bool mutex_lt=std::less<Mutex*>{}(&m1,&m2);
0173
0174 pm1=mutex_lt?&m1:&m2;
0175 pm1->lock();
0176 if(&m1==&m2){
0177 pm2=nullptr;
0178 }
0179 else{
0180 pm2=mutex_lt?&m2:&m1;
0181 pm2->lock();
0182 }
0183 }
0184
0185
0186 scoped_bilock(const scoped_bilock&);
0187
0188 ~scoped_bilock()noexcept
0189 {
0190 if(pm2)pm2->unlock();
0191 pm1->unlock();
0192 }
0193
0194 private:
0195 Mutex *pm1,*pm2;
0196 };
0197
0198
0199
0200 template<typename Integral>
0201 struct atomic_integral
0202 {
0203 operator Integral()const{return n.load(std::memory_order_relaxed);}
0204 void operator=(Integral m){n.store(m,std::memory_order_relaxed);}
0205 void operator|=(Integral m){n.fetch_or(m,std::memory_order_relaxed);}
0206 void operator&=(Integral m){n.fetch_and(m,std::memory_order_relaxed);}
0207
0208 atomic_integral& operator=(atomic_integral const& rhs) {
0209 n.store(rhs.n.load(std::memory_order_relaxed),std::memory_order_relaxed);
0210 return *this;
0211 }
0212
0213 std::atomic<Integral> n;
0214 };
0215
0216
0217
0218
0219
0220
0221 struct group_access
0222 {
0223 using mutex_type=rw_spinlock;
0224 using shared_lock_guard=shared_lock<mutex_type>;
0225 using exclusive_lock_guard=lock_guard<mutex_type>;
0226 using insert_counter_type=std::atomic<boost::uint32_t>;
0227
0228 shared_lock_guard shared_access(){return shared_lock_guard{m};}
0229 exclusive_lock_guard exclusive_access(){return exclusive_lock_guard{m};}
0230 insert_counter_type& insert_counter(){return cnt;}
0231
0232 private:
0233 mutex_type m;
0234 insert_counter_type cnt{0};
0235 };
0236
0237 template<std::size_t Size>
0238 group_access* dummy_group_accesses()
0239 {
0240
0241
0242
0243
0244
0245
0246 static group_access accesses[Size];
0247
0248 return accesses;
0249 }
0250
0251
0252
0253 template<typename Value,typename Group,typename SizePolicy,typename Allocator>
0254 struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy,Allocator>
0255 {
0256 using group_access_allocator_type=
0257 typename boost::allocator_rebind<Allocator,group_access>::type;
0258 using group_access_pointer=
0259 typename boost::allocator_pointer<group_access_allocator_type>::type;
0260
0261 using super=table_arrays<Value,Group,SizePolicy,Allocator>;
0262 using allocator_type=typename super::allocator_type;
0263
0264 concurrent_table_arrays(const super& arrays,group_access_pointer pga):
0265 super{arrays},group_accesses_{pga}{}
0266
0267 group_access* group_accesses()const noexcept{
0268 return boost::to_address(group_accesses_);
0269 }
0270
0271 static concurrent_table_arrays new_(allocator_type al,std::size_t n)
0272 {
0273 super x{super::new_(al,n)};
0274 BOOST_TRY{
0275 return new_group_access(group_access_allocator_type(al),x);
0276 }
0277 BOOST_CATCH(...){
0278 super::delete_(al,x);
0279 BOOST_RETHROW
0280 }
0281 BOOST_CATCH_END
0282 }
0283
0284 static void set_group_access(
0285 group_access_allocator_type al,concurrent_table_arrays& arrays)
0286 {
0287 set_group_access(
0288 al,arrays,std::is_same<group_access*,group_access_pointer>{});
0289 }
0290
0291 static void set_group_access(
0292 group_access_allocator_type al,
0293 concurrent_table_arrays& arrays,
0294 std::false_type )
0295 {
0296 arrays.group_accesses_=
0297 boost::allocator_allocate(al,arrays.groups_size_mask+1);
0298
0299 for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
0300 ::new (arrays.group_accesses()+i) group_access();
0301 }
0302 }
0303
0304 static void set_group_access(
0305 group_access_allocator_type al,
0306 concurrent_table_arrays& arrays,
0307 std::true_type )
0308 {
0309 if(!arrays.elements()){
0310 arrays.group_accesses_=
0311 dummy_group_accesses<SizePolicy::min_size()>();
0312 } else {
0313 set_group_access(al,arrays,std::false_type{});
0314 }
0315 }
0316
0317 static concurrent_table_arrays new_group_access(
0318 group_access_allocator_type al,const super& x)
0319 {
0320 concurrent_table_arrays arrays{x,nullptr};
0321 set_group_access(al,arrays);
0322 return arrays;
0323 }
0324
0325 static void delete_(allocator_type al,concurrent_table_arrays& arrays)noexcept
0326 {
0327 delete_group_access(group_access_allocator_type(al),arrays);
0328 super::delete_(al,arrays);
0329 }
0330
0331 static void delete_group_access(
0332 group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept
0333 {
0334 if(arrays.elements()){
0335 boost::allocator_deallocate(
0336 al,arrays.group_accesses_,arrays.groups_size_mask+1);
0337 }
0338 }
0339
0340 group_access_pointer group_accesses_;
0341 };
0342
0343 struct atomic_size_control
0344 {
0345 static constexpr auto atomic_size_t_size=sizeof(std::atomic<std::size_t>);
0346 BOOST_UNORDERED_STATIC_ASSERT(atomic_size_t_size<cacheline_size);
0347
0348 atomic_size_control(std::size_t ml_,std::size_t size_):
0349 pad0_{},ml{ml_},pad1_{},size{size_}{}
0350 atomic_size_control(const atomic_size_control& x):
0351 pad0_{},ml{x.ml.load()},pad1_{},size{x.size.load()}{}
0352
0353
0354
0355 unsigned char pad0_[cacheline_size-atomic_size_t_size];
0356 std::atomic<std::size_t> ml;
0357 unsigned char pad1_[cacheline_size-atomic_size_t_size];
0358 std::atomic<std::size_t> size;
0359 };
0360
0361
0362
0363 inline void
0364 swap_atomic_size_t(std::atomic<std::size_t>& x,std::atomic<std::size_t>& y)
0365 {
0366 std::size_t tmp=x;
0367 x=static_cast<std::size_t>(y);
0368 y=tmp;
0369 }
0370
0371 inline void swap(atomic_size_control& x,atomic_size_control& y)
0372 {
0373 swap_atomic_size_t(x.ml,y.ml);
0374 swap_atomic_size_t(x.size,y.size);
0375 }
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432 template<typename,typename,typename,typename>
0433 class table;
0434
0435 template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
0436 using concurrent_table_core_impl=table_core<
0437 TypePolicy,group15<atomic_integral>,concurrent_table_arrays,
0438 atomic_size_control,Hash,Pred,Allocator>;
0439
0440 #include <boost/unordered/detail/foa/ignore_wshadow.hpp>
0441
0442 #if defined(BOOST_MSVC)
0443 #pragma warning(push)
0444 #pragma warning(disable:4714)
0445 #endif
0446
0447 template<typename TypePolicy,typename Hash,typename Pred,typename Allocator>
0448 class concurrent_table:
0449 concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>
0450 {
0451 using super=concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>;
0452 using type_policy=typename super::type_policy;
0453 using group_type=typename super::group_type;
0454 using super::N;
0455 using prober=typename super::prober;
0456 using arrays_type=typename super::arrays_type;
0457 using size_ctrl_type=typename super::size_ctrl_type;
0458 using compatible_nonconcurrent_table=table<TypePolicy,Hash,Pred,Allocator>;
0459 friend compatible_nonconcurrent_table;
0460
0461 public:
0462 using key_type=typename super::key_type;
0463 using init_type=typename super::init_type;
0464 using value_type=typename super::value_type;
0465 using element_type=typename super::element_type;
0466 using hasher=typename super::hasher;
0467 using key_equal=typename super::key_equal;
0468 using allocator_type=typename super::allocator_type;
0469 using size_type=typename super::size_type;
0470 static constexpr std::size_t bulk_visit_size=16;
0471
0472 #if defined(BOOST_UNORDERED_ENABLE_STATS)
0473 using stats=typename super::stats;
0474 #endif
0475
0476 private:
0477 template<typename Value,typename T>
0478 using enable_if_is_value_type=typename std::enable_if<
0479 !std::is_same<init_type,value_type>::value&&
0480 std::is_same<Value,value_type>::value,
0481 T
0482 >::type;
0483
0484 public:
0485 concurrent_table(
0486 std::size_t n=default_bucket_count,const Hash& h_=Hash(),
0487 const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
0488 super{n,h_,pred_,al_}
0489 {}
0490
0491 concurrent_table(const concurrent_table& x):
0492 concurrent_table(x,x.exclusive_access()){}
0493 concurrent_table(concurrent_table&& x):
0494 concurrent_table(std::move(x),x.exclusive_access()){}
0495 concurrent_table(const concurrent_table& x,const Allocator& al_):
0496 concurrent_table(x,al_,x.exclusive_access()){}
0497 concurrent_table(concurrent_table&& x,const Allocator& al_):
0498 concurrent_table(std::move(x),al_,x.exclusive_access()){}
0499
0500 template<typename ArraysType>
0501 concurrent_table(
0502 compatible_nonconcurrent_table&& x,
0503 arrays_holder<ArraysType,Allocator>&& ah):
0504 super{
0505 std::move(x.h()),std::move(x.pred()),std::move(x.al()),
0506 [&x]{return arrays_type::new_group_access(
0507 x.al(),typename arrays_type::super{
0508 x.arrays.groups_size_index,x.arrays.groups_size_mask,
0509 to_pointer<typename arrays_type::group_type_pointer>(
0510 reinterpret_cast<group_type*>(x.arrays.groups())),
0511 x.arrays.elements_});},
0512 size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}}
0513 {
0514 x.arrays=ah.release();
0515 x.size_ctrl.ml=x.initial_max_load();
0516 x.size_ctrl.size=0;
0517 BOOST_UNORDERED_SWAP_STATS(this->cstats,x.cstats);
0518 }
0519
0520 concurrent_table(compatible_nonconcurrent_table&& x):
0521 concurrent_table(std::move(x),x.make_empty_arrays())
0522 {}
0523
0524 ~concurrent_table()=default;
0525
0526 concurrent_table& operator=(const concurrent_table& x)
0527 {
0528 auto lck=exclusive_access(*this,x);
0529 super::operator=(x);
0530 return *this;
0531 }
0532
0533 concurrent_table& operator=(concurrent_table&& x)noexcept(
0534 noexcept(std::declval<super&>() = std::declval<super&&>()))
0535 {
0536 auto lck=exclusive_access(*this,x);
0537 super::operator=(std::move(x));
0538 return *this;
0539 }
0540
0541 concurrent_table& operator=(std::initializer_list<value_type> il) {
0542 auto lck=exclusive_access();
0543 super::clear();
0544 super::noshrink_reserve(il.size());
0545 for (auto const& v : il) {
0546 this->unprotected_emplace(v);
0547 }
0548 return *this;
0549 }
0550
0551 allocator_type get_allocator()const noexcept
0552 {
0553 auto lck=shared_access();
0554 return super::get_allocator();
0555 }
0556
0557 template<typename Key,typename F>
0558 BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)
0559 {
0560 return visit_impl(group_exclusive{},x,std::forward<F>(f));
0561 }
0562
0563 template<typename Key,typename F>
0564 BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)const
0565 {
0566 return visit_impl(group_shared{},x,std::forward<F>(f));
0567 }
0568
0569 template<typename Key,typename F>
0570 BOOST_FORCEINLINE std::size_t cvisit(const Key& x,F&& f)const
0571 {
0572 return visit(x,std::forward<F>(f));
0573 }
0574
0575 template<typename FwdIterator,typename F>
0576 BOOST_FORCEINLINE
0577 std::size_t visit(FwdIterator first,FwdIterator last,F&& f)
0578 {
0579 return bulk_visit_impl(group_exclusive{},first,last,std::forward<F>(f));
0580 }
0581
0582 template<typename FwdIterator,typename F>
0583 BOOST_FORCEINLINE
0584 std::size_t visit(FwdIterator first,FwdIterator last,F&& f)const
0585 {
0586 return bulk_visit_impl(group_shared{},first,last,std::forward<F>(f));
0587 }
0588
0589 template<typename FwdIterator,typename F>
0590 BOOST_FORCEINLINE
0591 std::size_t cvisit(FwdIterator first,FwdIterator last,F&& f)const
0592 {
0593 return visit(first,last,std::forward<F>(f));
0594 }
0595
0596 template<typename F> std::size_t visit_all(F&& f)
0597 {
0598 return visit_all_impl(group_exclusive{},std::forward<F>(f));
0599 }
0600
0601 template<typename F> std::size_t visit_all(F&& f)const
0602 {
0603 return visit_all_impl(group_shared{},std::forward<F>(f));
0604 }
0605
0606 template<typename F> std::size_t cvisit_all(F&& f)const
0607 {
0608 return visit_all(std::forward<F>(f));
0609 }
0610
0611 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0612 template<typename ExecutionPolicy,typename F>
0613 void visit_all(ExecutionPolicy&& policy,F&& f)
0614 {
0615 visit_all_impl(
0616 group_exclusive{},
0617 std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0618 }
0619
0620 template<typename ExecutionPolicy,typename F>
0621 void visit_all(ExecutionPolicy&& policy,F&& f)const
0622 {
0623 visit_all_impl(
0624 group_shared{},
0625 std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0626 }
0627
0628 template<typename ExecutionPolicy,typename F>
0629 void cvisit_all(ExecutionPolicy&& policy,F&& f)const
0630 {
0631 visit_all(std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0632 }
0633 #endif
0634
0635 template<typename F> bool visit_while(F&& f)
0636 {
0637 return visit_while_impl(group_exclusive{},std::forward<F>(f));
0638 }
0639
0640 template<typename F> bool visit_while(F&& f)const
0641 {
0642 return visit_while_impl(group_shared{},std::forward<F>(f));
0643 }
0644
0645 template<typename F> bool cvisit_while(F&& f)const
0646 {
0647 return visit_while(std::forward<F>(f));
0648 }
0649
0650 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0651 template<typename ExecutionPolicy,typename F>
0652 bool visit_while(ExecutionPolicy&& policy,F&& f)
0653 {
0654 return visit_while_impl(
0655 group_exclusive{},
0656 std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0657 }
0658
0659 template<typename ExecutionPolicy,typename F>
0660 bool visit_while(ExecutionPolicy&& policy,F&& f)const
0661 {
0662 return visit_while_impl(
0663 group_shared{},
0664 std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0665 }
0666
0667 template<typename ExecutionPolicy,typename F>
0668 bool cvisit_while(ExecutionPolicy&& policy,F&& f)const
0669 {
0670 return visit_while(
0671 std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0672 }
0673 #endif
0674
0675 bool empty()const noexcept{return size()==0;}
0676
0677 std::size_t size()const noexcept
0678 {
0679 auto lck=shared_access();
0680 return unprotected_size();
0681 }
0682
0683 using super::max_size;
0684
0685 template<typename... Args>
0686 BOOST_FORCEINLINE bool emplace(Args&&... args)
0687 {
0688 return construct_and_emplace(std::forward<Args>(args)...);
0689 }
0690
0691
0692 template<typename Value>
0693 BOOST_FORCEINLINE auto emplace(Value&& x)->typename std::enable_if<
0694 detail::is_similar_to_any<Value,value_type,init_type>::value,bool>::type
0695 {
0696 return emplace_impl(std::forward<Value>(x));
0697 }
0698
0699
0700 template <typename K, typename V>
0701 BOOST_FORCEINLINE auto emplace(K&& k, V&& v) ->
0702 typename std::enable_if<is_emplace_kv_able<concurrent_table, K>::value,
0703 bool>::type
0704 {
0705 alloc_cted_or_fwded_key_type<type_policy, Allocator, K&&> x(
0706 this->al(), std::forward<K>(k));
0707 return emplace_impl(
0708 try_emplace_args_t{}, x.move_or_fwd(), std::forward<V>(v));
0709 }
0710
0711 BOOST_FORCEINLINE bool
0712 insert(const init_type& x){return emplace_impl(x);}
0713
0714 BOOST_FORCEINLINE bool
0715 insert(init_type&& x){return emplace_impl(std::move(x));}
0716
0717
0718
0719 template<typename=void>
0720 BOOST_FORCEINLINE bool
0721 insert(const value_type& x){return emplace_impl(x);}
0722
0723 template<typename=void>
0724 BOOST_FORCEINLINE bool
0725 insert(value_type&& x){return emplace_impl(std::move(x));}
0726
0727 template<typename Key,typename... Args>
0728 BOOST_FORCEINLINE bool try_emplace(Key&& x,Args&&... args)
0729 {
0730 return emplace_impl(
0731 try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0732 }
0733
0734 template<typename Key,typename... Args>
0735 BOOST_FORCEINLINE bool try_emplace_or_visit(Key&& x,Args&&... args)
0736 {
0737 return emplace_or_visit_flast(
0738 group_exclusive{},
0739 try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0740 }
0741
0742 template<typename Key,typename... Args>
0743 BOOST_FORCEINLINE bool try_emplace_or_cvisit(Key&& x,Args&&... args)
0744 {
0745 return emplace_or_visit_flast(
0746 group_shared{},
0747 try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0748 }
0749
0750 template<typename... Args>
0751 BOOST_FORCEINLINE bool emplace_or_visit(Args&&... args)
0752 {
0753 return construct_and_emplace_or_visit_flast(
0754 group_exclusive{},std::forward<Args>(args)...);
0755 }
0756
0757 template<typename... Args>
0758 BOOST_FORCEINLINE bool emplace_or_cvisit(Args&&... args)
0759 {
0760 return construct_and_emplace_or_visit_flast(
0761 group_shared{},std::forward<Args>(args)...);
0762 }
0763
0764 template<typename F>
0765 BOOST_FORCEINLINE bool insert_or_visit(const init_type& x,F&& f)
0766 {
0767 return emplace_or_visit_impl(group_exclusive{},std::forward<F>(f),x);
0768 }
0769
0770 template<typename F>
0771 BOOST_FORCEINLINE bool insert_or_cvisit(const init_type& x,F&& f)
0772 {
0773 return emplace_or_visit_impl(group_shared{},std::forward<F>(f),x);
0774 }
0775
0776 template<typename F>
0777 BOOST_FORCEINLINE bool insert_or_visit(init_type&& x,F&& f)
0778 {
0779 return emplace_or_visit_impl(
0780 group_exclusive{},std::forward<F>(f),std::move(x));
0781 }
0782
0783 template<typename F>
0784 BOOST_FORCEINLINE bool insert_or_cvisit(init_type&& x,F&& f)
0785 {
0786 return emplace_or_visit_impl(
0787 group_shared{},std::forward<F>(f),std::move(x));
0788 }
0789
0790
0791
0792 template<typename Value,typename F>
0793 BOOST_FORCEINLINE auto insert_or_visit(const Value& x,F&& f)
0794 ->enable_if_is_value_type<Value,bool>
0795 {
0796 return emplace_or_visit_impl(group_exclusive{},std::forward<F>(f),x);
0797 }
0798
0799 template<typename Value,typename F>
0800 BOOST_FORCEINLINE auto insert_or_cvisit(const Value& x,F&& f)
0801 ->enable_if_is_value_type<Value,bool>
0802 {
0803 return emplace_or_visit_impl(group_shared{},std::forward<F>(f),x);
0804 }
0805
0806 template<typename Value,typename F>
0807 BOOST_FORCEINLINE auto insert_or_visit(Value&& x,F&& f)
0808 ->enable_if_is_value_type<Value,bool>
0809 {
0810 return emplace_or_visit_impl(
0811 group_exclusive{},std::forward<F>(f),std::move(x));
0812 }
0813
0814 template<typename Value,typename F>
0815 BOOST_FORCEINLINE auto insert_or_cvisit(Value&& x,F&& f)
0816 ->enable_if_is_value_type<Value,bool>
0817 {
0818 return emplace_or_visit_impl(
0819 group_shared{},std::forward<F>(f),std::move(x));
0820 }
0821
0822 template<typename Key>
0823 BOOST_FORCEINLINE std::size_t erase(const Key& x)
0824 {
0825 return erase_if(x,[](const value_type&){return true;});
0826 }
0827
0828 template<typename Key,typename F>
0829 BOOST_FORCEINLINE auto erase_if(const Key& x,F&& f)->typename std::enable_if<
0830 !is_execution_policy<Key>::value,std::size_t>::type
0831 {
0832 auto lck=shared_access();
0833 auto hash=this->hash_for(x);
0834 std::size_t res=0;
0835 unprotected_internal_visit(
0836 group_exclusive{},x,this->position_for(hash),hash,
0837 [&,this](group_type* pg,unsigned int n,element_type* p)
0838 {
0839 if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
0840 super::erase(pg,n,p);
0841 res=1;
0842 }
0843 });
0844 return res;
0845 }
0846
0847 template<typename F>
0848 std::size_t erase_if(F&& f)
0849 {
0850 auto lck=shared_access();
0851 std::size_t res=0;
0852 for_all_elements(
0853 group_exclusive{},
0854 [&,this](group_type* pg,unsigned int n,element_type* p){
0855 if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
0856 super::erase(pg,n,p);
0857 ++res;
0858 }
0859 });
0860 return res;
0861 }
0862
0863 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0864 template<typename ExecutionPolicy,typename F>
0865 auto erase_if(ExecutionPolicy&& policy,F&& f)->typename std::enable_if<
0866 is_execution_policy<ExecutionPolicy>::value,void>::type
0867 {
0868 auto lck=shared_access();
0869 for_all_elements(
0870 group_exclusive{},std::forward<ExecutionPolicy>(policy),
0871 [&,this](group_type* pg,unsigned int n,element_type* p){
0872 if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
0873 super::erase(pg,n,p);
0874 }
0875 });
0876 }
0877 #endif
0878
0879 void swap(concurrent_table& x)
0880 noexcept(noexcept(std::declval<super&>().swap(std::declval<super&>())))
0881 {
0882 auto lck=exclusive_access(*this,x);
0883 super::swap(x);
0884 }
0885
0886 void clear()noexcept
0887 {
0888 auto lck=exclusive_access();
0889 super::clear();
0890 }
0891
0892
0893 template<typename Hash2,typename Pred2>
0894 size_type merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& x)
0895 {
0896 using merge_table_type=concurrent_table<TypePolicy,Hash2,Pred2,Allocator>;
0897 using super2=typename merge_table_type::super;
0898
0899
0900 boost::ignore_unused<super2>();
0901
0902 auto lck=exclusive_access(*this,x);
0903 size_type s=super::size();
0904 x.super2::for_all_elements(
0905 [&,this](group_type* pg,unsigned int n,element_type* p){
0906 typename merge_table_type::erase_on_exit e{x,pg,n,p};
0907 if(!unprotected_emplace(type_policy::move(*p)))e.rollback();
0908 });
0909 return size_type{super::size()-s};
0910 }
0911
0912 template<typename Hash2,typename Pred2>
0913 void merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>&& x){merge(x);}
0914
0915 hasher hash_function()const
0916 {
0917 auto lck=shared_access();
0918 return super::hash_function();
0919 }
0920
0921 key_equal key_eq()const
0922 {
0923 auto lck=shared_access();
0924 return super::key_eq();
0925 }
0926
0927 template<typename Key>
0928 BOOST_FORCEINLINE std::size_t count(Key&& x)const
0929 {
0930 return (std::size_t)contains(std::forward<Key>(x));
0931 }
0932
0933 template<typename Key>
0934 BOOST_FORCEINLINE bool contains(Key&& x)const
0935 {
0936 return visit(std::forward<Key>(x),[](const value_type&){})!=0;
0937 }
0938
0939 std::size_t capacity()const noexcept
0940 {
0941 auto lck=shared_access();
0942 return super::capacity();
0943 }
0944
0945 float load_factor()const noexcept
0946 {
0947 auto lck=shared_access();
0948 if(super::capacity()==0)return 0;
0949 else return float(unprotected_size())/
0950 float(super::capacity());
0951 }
0952
0953 using super::max_load_factor;
0954
0955 std::size_t max_load()const noexcept
0956 {
0957 auto lck=shared_access();
0958 return super::max_load();
0959 }
0960
0961 void rehash(std::size_t n)
0962 {
0963 auto lck=exclusive_access();
0964 super::rehash(n);
0965 }
0966
0967 void reserve(std::size_t n)
0968 {
0969 auto lck=exclusive_access();
0970 super::reserve(n);
0971 }
0972
0973 #if defined(BOOST_UNORDERED_ENABLE_STATS)
0974
0975
0976 using super::get_stats;
0977 using super::reset_stats;
0978 #endif
0979
0980 template<typename Predicate>
0981 friend std::size_t erase_if(concurrent_table& x,Predicate&& pr)
0982 {
0983 return x.erase_if(std::forward<Predicate>(pr));
0984 }
0985
0986 friend bool operator==(const concurrent_table& x,const concurrent_table& y)
0987 {
0988 auto lck=exclusive_access(x,y);
0989 return static_cast<const super&>(x)==static_cast<const super&>(y);
0990 }
0991
0992 friend bool operator!=(const concurrent_table& x,const concurrent_table& y)
0993 {
0994 return !(x==y);
0995 }
0996
0997 private:
0998 template<typename,typename,typename,typename> friend class concurrent_table;
0999
1000 using mutex_type=rw_spinlock;
1001 using multimutex_type=multimutex<mutex_type,128>;
1002 using shared_lock_guard=reentrancy_checked<shared_lock<mutex_type>>;
1003 using exclusive_lock_guard=reentrancy_checked<lock_guard<multimutex_type>>;
1004 using exclusive_bilock_guard=
1005 reentrancy_bichecked<scoped_bilock<multimutex_type>>;
1006 using group_shared_lock_guard=typename group_access::shared_lock_guard;
1007 using group_exclusive_lock_guard=typename group_access::exclusive_lock_guard;
1008 using group_insert_counter_type=typename group_access::insert_counter_type;
1009
1010 concurrent_table(const concurrent_table& x,exclusive_lock_guard):
1011 super{x}{}
1012 concurrent_table(concurrent_table&& x,exclusive_lock_guard):
1013 super{std::move(x)}{}
1014 concurrent_table(
1015 const concurrent_table& x,const Allocator& al_,exclusive_lock_guard):
1016 super{x,al_}{}
1017 concurrent_table(
1018 concurrent_table&& x,const Allocator& al_,exclusive_lock_guard):
1019 super{std::move(x),al_}{}
1020
1021 inline shared_lock_guard shared_access()const
1022 {
1023 thread_local auto id=(++thread_counter)%mutexes.size();
1024
1025 return shared_lock_guard{this,mutexes[id]};
1026 }
1027
1028 inline exclusive_lock_guard exclusive_access()const
1029 {
1030 return exclusive_lock_guard{this,mutexes};
1031 }
1032
1033 static inline exclusive_bilock_guard exclusive_access(
1034 const concurrent_table& x,const concurrent_table& y)
1035 {
1036 return {&x,&y,x.mutexes,y.mutexes};
1037 }
1038
1039 template<typename Hash2,typename Pred2>
1040 static inline exclusive_bilock_guard exclusive_access(
1041 const concurrent_table& x,
1042 const concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& y)
1043 {
1044 return {&x,&y,x.mutexes,y.mutexes};
1045 }
1046
1047
1048
1049 using group_shared=std::false_type;
1050 using group_exclusive=std::true_type;
1051
1052 inline group_shared_lock_guard access(group_shared,std::size_t pos)const
1053 {
1054 return this->arrays.group_accesses()[pos].shared_access();
1055 }
1056
1057 inline group_exclusive_lock_guard access(
1058 group_exclusive,std::size_t pos)const
1059 {
1060 return this->arrays.group_accesses()[pos].exclusive_access();
1061 }
1062
1063 inline group_insert_counter_type& insert_counter(std::size_t pos)const
1064 {
1065 return this->arrays.group_accesses()[pos].insert_counter();
1066 }
1067
1068
1069
1070
1071
1072
1073 static inline const value_type&
1074 cast_for(group_shared,value_type& x){return x;}
1075
1076 static inline typename std::conditional<
1077 std::is_same<key_type,value_type>::value,
1078 const value_type&,
1079 value_type&
1080 >::type
1081 cast_for(group_exclusive,value_type& x){return x;}
1082
1083 struct erase_on_exit
1084 {
1085 erase_on_exit(
1086 concurrent_table& x_,
1087 group_type* pg_,unsigned int pos_,element_type* p_):
1088 x(x_),pg(pg_),pos(pos_),p(p_){}
1089 ~erase_on_exit(){if(!rollback_)x.super::erase(pg,pos,p);}
1090
1091 void rollback(){rollback_=true;}
1092
1093 concurrent_table &x;
1094 group_type *pg;
1095 unsigned int pos;
1096 element_type *p;
1097 bool rollback_=false;
1098 };
1099
1100 template<typename GroupAccessMode,typename Key,typename F>
1101 BOOST_FORCEINLINE std::size_t visit_impl(
1102 GroupAccessMode access_mode,const Key& x,F&& f)const
1103 {
1104 auto lck=shared_access();
1105 auto hash=this->hash_for(x);
1106 return unprotected_visit(
1107 access_mode,x,this->position_for(hash),hash,std::forward<F>(f));
1108 }
1109
1110 template<typename GroupAccessMode,typename FwdIterator,typename F>
1111 BOOST_FORCEINLINE
1112 std::size_t bulk_visit_impl(
1113 GroupAccessMode access_mode,FwdIterator first,FwdIterator last,F&& f)const
1114 {
1115 auto lck=shared_access();
1116 std::size_t res=0;
1117 auto n=static_cast<std::size_t>(std::distance(first,last));
1118 while(n){
1119 auto m=n<2*bulk_visit_size?n:bulk_visit_size;
1120 res+=unprotected_bulk_visit(access_mode,first,m,std::forward<F>(f));
1121 n-=m;
1122 std::advance(
1123 first,
1124 static_cast<
1125 typename std::iterator_traits<FwdIterator>::difference_type>(m));
1126 }
1127 return res;
1128 }
1129
1130 template<typename GroupAccessMode,typename F>
1131 std::size_t visit_all_impl(GroupAccessMode access_mode,F&& f)const
1132 {
1133 auto lck=shared_access();
1134 std::size_t res=0;
1135 for_all_elements(access_mode,[&](element_type* p){
1136 f(cast_for(access_mode,type_policy::value_from(*p)));
1137 ++res;
1138 });
1139 return res;
1140 }
1141
1142 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
1143 template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1144 void visit_all_impl(
1145 GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
1146 {
1147 auto lck=shared_access();
1148 for_all_elements(
1149 access_mode,std::forward<ExecutionPolicy>(policy),
1150 [&](element_type* p){
1151 f(cast_for(access_mode,type_policy::value_from(*p)));
1152 });
1153 }
1154 #endif
1155
1156 template<typename GroupAccessMode,typename F>
1157 bool visit_while_impl(GroupAccessMode access_mode,F&& f)const
1158 {
1159 auto lck=shared_access();
1160 return for_all_elements_while(access_mode,[&](element_type* p){
1161 return f(cast_for(access_mode,type_policy::value_from(*p)));
1162 });
1163 }
1164
1165 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
1166 template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1167 bool visit_while_impl(
1168 GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
1169 {
1170 auto lck=shared_access();
1171 return for_all_elements_while(
1172 access_mode,std::forward<ExecutionPolicy>(policy),
1173 [&](element_type* p){
1174 return f(cast_for(access_mode,type_policy::value_from(*p)));
1175 });
1176 }
1177 #endif
1178
1179 template<typename GroupAccessMode,typename Key,typename F>
1180 BOOST_FORCEINLINE std::size_t unprotected_visit(
1181 GroupAccessMode access_mode,
1182 const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
1183 {
1184 return unprotected_internal_visit(
1185 access_mode,x,pos0,hash,
1186 [&](group_type*,unsigned int,element_type* p)
1187 {f(cast_for(access_mode,type_policy::value_from(*p)));});
1188 }
1189
1190 #if defined(BOOST_MSVC)
1191
1192 #pragma warning(push)
1193 #pragma warning(disable:4800)
1194 #endif
1195
1196 template<typename GroupAccessMode,typename Key,typename F>
1197 BOOST_FORCEINLINE std::size_t unprotected_internal_visit(
1198 GroupAccessMode access_mode,
1199 const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
1200 {
1201 BOOST_UNORDERED_STATS_COUNTER(num_cmps);
1202 prober pb(pos0);
1203 do{
1204 auto pos=pb.get();
1205 auto pg=this->arrays.groups()+pos;
1206 auto mask=pg->match(hash);
1207 if(mask){
1208 auto p=this->arrays.elements()+pos*N;
1209 BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
1210 auto lck=access(access_mode,pos);
1211 do{
1212 auto n=unchecked_countr_zero(mask);
1213 if(BOOST_LIKELY(pg->is_occupied(n))){
1214 BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
1215 if(BOOST_LIKELY(bool(this->pred()(x,this->key_from(p[n]))))){
1216 f(pg,n,p+n);
1217 BOOST_UNORDERED_ADD_STATS(
1218 this->cstats.successful_lookup,(pb.length(),num_cmps));
1219 return 1;
1220 }
1221 }
1222 mask&=mask-1;
1223 }while(mask);
1224 }
1225 if(BOOST_LIKELY(pg->is_not_overflowed(hash))){
1226 BOOST_UNORDERED_ADD_STATS(
1227 this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
1228 return 0;
1229 }
1230 }
1231 while(BOOST_LIKELY(pb.next(this->arrays.groups_size_mask)));
1232 BOOST_UNORDERED_ADD_STATS(
1233 this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
1234 return 0;
1235 }
1236
1237 template<typename GroupAccessMode,typename FwdIterator,typename F>
1238 BOOST_FORCEINLINE std::size_t unprotected_bulk_visit(
1239 GroupAccessMode access_mode,FwdIterator first,std::size_t m,F&& f)const
1240 {
1241 BOOST_ASSERT(m<2*bulk_visit_size);
1242
1243 std::size_t res=0,
1244 hashes[2*bulk_visit_size-1],
1245 positions[2*bulk_visit_size-1];
1246 int masks[2*bulk_visit_size-1];
1247 auto it=first;
1248
1249 for(auto i=m;i--;++it){
1250 auto hash=hashes[i]=this->hash_for(*it);
1251 auto pos=positions[i]=this->position_for(hash);
1252 BOOST_UNORDERED_PREFETCH(this->arrays.groups()+pos);
1253 }
1254
1255 for(auto i=m;i--;){
1256 auto hash=hashes[i];
1257 auto pos=positions[i];
1258 auto mask=masks[i]=(this->arrays.groups()+pos)->match(hash);
1259 if(mask){
1260 BOOST_UNORDERED_PREFETCH(this->arrays.group_accesses()+pos);
1261 BOOST_UNORDERED_PREFETCH(
1262 this->arrays.elements()+pos*N+unchecked_countr_zero(mask));
1263 }
1264 }
1265
1266 it=first;
1267 for(auto i=m;i--;++it){
1268 BOOST_UNORDERED_STATS_COUNTER(num_cmps);
1269 auto pos=positions[i];
1270 prober pb(pos);
1271 auto pg=this->arrays.groups()+pos;
1272 auto mask=masks[i];
1273 element_type *p;
1274 if(!mask)goto post_mask;
1275 p=this->arrays.elements()+pos*N;
1276 for(;;){
1277 {
1278 auto lck=access(access_mode,pos);
1279 do{
1280 auto n=unchecked_countr_zero(mask);
1281 if(BOOST_LIKELY(pg->is_occupied(n))){
1282 BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
1283 if(bool(this->pred()(*it,this->key_from(p[n])))){
1284 f(cast_for(access_mode,type_policy::value_from(p[n])));
1285 ++res;
1286 BOOST_UNORDERED_ADD_STATS(
1287 this->cstats.successful_lookup,(pb.length(),num_cmps));
1288 goto next_key;
1289 }
1290 }
1291 mask&=mask-1;
1292 }while(mask);
1293 }
1294 post_mask:
1295 do{
1296 if(BOOST_LIKELY(pg->is_not_overflowed(hashes[i]))||
1297 BOOST_UNLIKELY(!pb.next(this->arrays.groups_size_mask))){
1298 BOOST_UNORDERED_ADD_STATS(
1299 this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
1300 goto next_key;
1301 }
1302 pos=pb.get();
1303 pg=this->arrays.groups()+pos;
1304 mask=pg->match(hashes[i]);
1305 }while(!mask);
1306 p=this->arrays.elements()+pos*N;
1307 BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
1308 }
1309 next_key:;
1310 }
1311 return res;
1312 }
1313
1314 #if defined(BOOST_MSVC)
1315 #pragma warning(pop)
1316 #endif
1317
1318 std::size_t unprotected_size()const
1319 {
1320 std::size_t m=this->size_ctrl.ml;
1321 std::size_t s=this->size_ctrl.size;
1322 return s<=m?s:m;
1323 }
1324
1325 template<typename... Args>
1326 BOOST_FORCEINLINE bool construct_and_emplace(Args&&... args)
1327 {
1328 return construct_and_emplace_or_visit(
1329 group_shared{},[](const value_type&){},std::forward<Args>(args)...);
1330 }
1331
1332 struct call_construct_and_emplace_or_visit
1333 {
1334 template<typename... Args>
1335 BOOST_FORCEINLINE bool operator()(
1336 concurrent_table* this_,Args&&... args)const
1337 {
1338 return this_->construct_and_emplace_or_visit(
1339 std::forward<Args>(args)...);
1340 }
1341 };
1342
1343 template<typename GroupAccessMode,typename... Args>
1344 BOOST_FORCEINLINE bool construct_and_emplace_or_visit_flast(
1345 GroupAccessMode access_mode,Args&&... args)
1346 {
1347 return mp11::tuple_apply(
1348 call_construct_and_emplace_or_visit{},
1349 std::tuple_cat(
1350 std::make_tuple(this,access_mode),
1351 tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...))
1352 )
1353 );
1354 }
1355
1356 template<typename GroupAccessMode,typename F,typename... Args>
1357 BOOST_FORCEINLINE bool construct_and_emplace_or_visit(
1358 GroupAccessMode access_mode,F&& f,Args&&... args)
1359 {
1360 auto lck=shared_access();
1361
1362 alloc_cted_insert_type<type_policy,Allocator,Args...> x(
1363 this->al(),std::forward<Args>(args)...);
1364 int res=unprotected_norehash_emplace_or_visit(
1365 access_mode,std::forward<F>(f),type_policy::move(x.value()));
1366 if(BOOST_LIKELY(res>=0))return res!=0;
1367
1368 lck.unlock();
1369
1370 rehash_if_full();
1371 return noinline_emplace_or_visit(
1372 access_mode,std::forward<F>(f),type_policy::move(x.value()));
1373 }
1374
1375 template<typename... Args>
1376 BOOST_FORCEINLINE bool emplace_impl(Args&&... args)
1377 {
1378 return emplace_or_visit_impl(
1379 group_shared{},[](const value_type&){},std::forward<Args>(args)...);
1380 }
1381
1382 template<typename GroupAccessMode,typename F,typename... Args>
1383 BOOST_NOINLINE bool noinline_emplace_or_visit(
1384 GroupAccessMode access_mode,F&& f,Args&&... args)
1385 {
1386 return emplace_or_visit_impl(
1387 access_mode,std::forward<F>(f),std::forward<Args>(args)...);
1388 }
1389
1390 struct call_emplace_or_visit_impl
1391 {
1392 template<typename... Args>
1393 BOOST_FORCEINLINE bool operator()(
1394 concurrent_table* this_,Args&&... args)const
1395 {
1396 return this_->emplace_or_visit_impl(std::forward<Args>(args)...);
1397 }
1398 };
1399
1400 template<typename GroupAccessMode,typename... Args>
1401 BOOST_FORCEINLINE bool emplace_or_visit_flast(
1402 GroupAccessMode access_mode,Args&&... args)
1403 {
1404 return mp11::tuple_apply(
1405 call_emplace_or_visit_impl{},
1406 std::tuple_cat(
1407 std::make_tuple(this,access_mode),
1408 tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...))
1409 )
1410 );
1411 }
1412
1413 template<typename GroupAccessMode,typename F,typename... Args>
1414 BOOST_FORCEINLINE bool emplace_or_visit_impl(
1415 GroupAccessMode access_mode,F&& f,Args&&... args)
1416 {
1417 for(;;){
1418 {
1419 auto lck=shared_access();
1420 int res=unprotected_norehash_emplace_or_visit(
1421 access_mode,std::forward<F>(f),std::forward<Args>(args)...);
1422 if(BOOST_LIKELY(res>=0))return res!=0;
1423 }
1424 rehash_if_full();
1425 }
1426 }
1427
1428 template<typename... Args>
1429 BOOST_FORCEINLINE bool unprotected_emplace(Args&&... args)
1430 {
1431 const auto &k=this->key_from(std::forward<Args>(args)...);
1432 auto hash=this->hash_for(k);
1433 auto pos0=this->position_for(hash);
1434
1435 if(this->find(k,pos0,hash))return false;
1436
1437 if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){
1438 this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...);
1439 }
1440 else{
1441 this->unchecked_emplace_with_rehash(hash,std::forward<Args>(args)...);
1442 }
1443 return true;
1444 }
1445
1446 struct reserve_size
1447 {
1448 reserve_size(concurrent_table& x_):x(x_)
1449 {
1450 size_=++x.size_ctrl.size;
1451 }
1452
1453 ~reserve_size()
1454 {
1455 if(!commit_)--x.size_ctrl.size;
1456 }
1457
1458 bool succeeded()const{return size_<=x.size_ctrl.ml;}
1459
1460 void commit(){commit_=true;}
1461
1462 concurrent_table &x;
1463 std::size_t size_;
1464 bool commit_=false;
1465 };
1466
1467 struct reserve_slot
1468 {
1469 reserve_slot(group_type* pg_,std::size_t pos_,std::size_t hash):
1470 pg{pg_},pos{pos_}
1471 {
1472 pg->set(pos,hash);
1473 }
1474
1475 ~reserve_slot()
1476 {
1477 if(!commit_)pg->reset(pos);
1478 }
1479
1480 void commit(){commit_=true;}
1481
1482 group_type *pg;
1483 std::size_t pos;
1484 bool commit_=false;
1485 };
1486
1487 template<typename GroupAccessMode,typename F,typename... Args>
1488 BOOST_FORCEINLINE int
1489 unprotected_norehash_emplace_or_visit(
1490 GroupAccessMode access_mode,F&& f,Args&&... args)
1491 {
1492 const auto &k=this->key_from(std::forward<Args>(args)...);
1493 auto hash=this->hash_for(k);
1494 auto pos0=this->position_for(hash);
1495
1496 for(;;){
1497 startover:
1498 boost::uint32_t counter=insert_counter(pos0);
1499 if(unprotected_visit(
1500 access_mode,k,pos0,hash,std::forward<F>(f)))return 0;
1501
1502 reserve_size rsize(*this);
1503 if(BOOST_LIKELY(rsize.succeeded())){
1504 for(prober pb(pos0);;pb.next(this->arrays.groups_size_mask)){
1505 auto pos=pb.get();
1506 auto pg=this->arrays.groups()+pos;
1507 auto lck=access(group_exclusive{},pos);
1508 auto mask=pg->match_available();
1509 if(BOOST_LIKELY(mask!=0)){
1510 auto n=unchecked_countr_zero(mask);
1511 reserve_slot rslot{pg,n,hash};
1512 if(BOOST_UNLIKELY(insert_counter(pos0)++!=counter)){
1513
1514 goto startover;
1515 }
1516 auto p=this->arrays.elements()+pos*N+n;
1517 this->construct_element(p,std::forward<Args>(args)...);
1518 rslot.commit();
1519 rsize.commit();
1520 BOOST_UNORDERED_ADD_STATS(this->cstats.insertion,(pb.length()));
1521 return 1;
1522 }
1523 pg->mark_overflow(hash);
1524 }
1525 }
1526 else return -1;
1527 }
1528 }
1529
1530 void rehash_if_full()
1531 {
1532 auto lck=exclusive_access();
1533 if(this->size_ctrl.size==this->size_ctrl.ml){
1534 this->unchecked_rehash_for_growth();
1535 }
1536 }
1537
1538 template<typename GroupAccessMode,typename F>
1539 auto for_all_elements(GroupAccessMode access_mode,F f)const
1540 ->decltype(f(nullptr),void())
1541 {
1542 for_all_elements(
1543 access_mode,[&](group_type*,unsigned int,element_type* p){f(p);});
1544 }
1545
1546 template<typename GroupAccessMode,typename F>
1547 auto for_all_elements(GroupAccessMode access_mode,F f)const
1548 ->decltype(f(nullptr,0,nullptr),void())
1549 {
1550 for_all_elements_while(
1551 access_mode,[&](group_type* pg,unsigned int n,element_type* p)
1552 {f(pg,n,p);return true;});
1553 }
1554
1555 template<typename GroupAccessMode,typename F>
1556 auto for_all_elements_while(GroupAccessMode access_mode,F f)const
1557 ->decltype(f(nullptr),bool())
1558 {
1559 return for_all_elements_while(
1560 access_mode,[&](group_type*,unsigned int,element_type* p){return f(p);});
1561 }
1562
1563 template<typename GroupAccessMode,typename F>
1564 auto for_all_elements_while(GroupAccessMode access_mode,F f)const
1565 ->decltype(f(nullptr,0,nullptr),bool())
1566 {
1567 auto p=this->arrays.elements();
1568 if(p){
1569 for(auto pg=this->arrays.groups(),last=pg+this->arrays.groups_size_mask+1;
1570 pg!=last;++pg,p+=N){
1571 auto lck=access(access_mode,(std::size_t)(pg-this->arrays.groups()));
1572 auto mask=this->match_really_occupied(pg,last);
1573 while(mask){
1574 auto n=unchecked_countr_zero(mask);
1575 if(!f(pg,n,p+n))return false;
1576 mask&=mask-1;
1577 }
1578 }
1579 }
1580 return true;
1581 }
1582
1583 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
1584 template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1585 auto for_all_elements(
1586 GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
1587 ->decltype(f(nullptr),void())
1588 {
1589 for_all_elements(
1590 access_mode,std::forward<ExecutionPolicy>(policy),
1591 [&](group_type*,unsigned int,element_type* p){f(p);});
1592 }
1593
1594 template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1595 auto for_all_elements(
1596 GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
1597 ->decltype(f(nullptr,0,nullptr),void())
1598 {
1599 if(!this->arrays.elements())return;
1600 auto first=this->arrays.groups(),
1601 last=first+this->arrays.groups_size_mask+1;
1602 std::for_each(std::forward<ExecutionPolicy>(policy),first,last,
1603 [&,this](group_type& g){
1604 auto pos=static_cast<std::size_t>(&g-first);
1605 auto p=this->arrays.elements()+pos*N;
1606 auto lck=access(access_mode,pos);
1607 auto mask=this->match_really_occupied(&g,last);
1608 while(mask){
1609 auto n=unchecked_countr_zero(mask);
1610 f(&g,n,p+n);
1611 mask&=mask-1;
1612 }
1613 }
1614 );
1615 }
1616
1617 template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1618 bool for_all_elements_while(
1619 GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
1620 {
1621 if(!this->arrays.elements())return true;
1622 auto first=this->arrays.groups(),
1623 last=first+this->arrays.groups_size_mask+1;
1624 return std::all_of(std::forward<ExecutionPolicy>(policy),first,last,
1625 [&,this](group_type& g){
1626 auto pos=static_cast<std::size_t>(&g-first);
1627 auto p=this->arrays.elements()+pos*N;
1628 auto lck=access(access_mode,pos);
1629 auto mask=this->match_really_occupied(&g,last);
1630 while(mask){
1631 auto n=unchecked_countr_zero(mask);
1632 if(!f(p+n))return false;
1633 mask&=mask-1;
1634 }
1635 return true;
1636 }
1637 );
1638 }
1639 #endif
1640
1641 friend class boost::serialization::access;
1642
1643 template<typename Archive>
1644 void serialize(Archive& ar,unsigned int version)
1645 {
1646 core::split_member(ar,*this,version);
1647 }
1648
1649 template<typename Archive>
1650 void save(Archive& ar,unsigned int version)const
1651 {
1652 save(
1653 ar,version,
1654 std::integral_constant<bool,std::is_same<key_type,value_type>::value>{});
1655 }
1656
1657 template<typename Archive>
1658 void save(Archive& ar,unsigned int,std::true_type )const
1659 {
1660 auto lck=exclusive_access();
1661 const std::size_t s=super::size();
1662 const serialization_version<value_type> value_version;
1663
1664 ar<<core::make_nvp("count",s);
1665 ar<<core::make_nvp("value_version",value_version);
1666
1667 super::for_all_elements([&,this](element_type* p){
1668 auto& x=type_policy::value_from(*p);
1669 core::save_construct_data_adl(ar,std::addressof(x),value_version);
1670 ar<<serialization::make_nvp("item",x);
1671 });
1672 }
1673
1674 template<typename Archive>
1675 void save(Archive& ar,unsigned int,std::false_type )const
1676 {
1677 using raw_key_type=typename std::remove_const<key_type>::type;
1678 using raw_mapped_type=typename std::remove_const<
1679 typename TypePolicy::mapped_type>::type;
1680
1681 auto lck=exclusive_access();
1682 const std::size_t s=super::size();
1683 const serialization_version<raw_key_type> key_version;
1684 const serialization_version<raw_mapped_type> mapped_version;
1685
1686 ar<<core::make_nvp("count",s);
1687 ar<<core::make_nvp("key_version",key_version);
1688 ar<<core::make_nvp("mapped_version",mapped_version);
1689
1690 super::for_all_elements([&,this](element_type* p){
1691
1692
1693
1694
1695
1696
1697 auto& x=type_policy::value_from(*p);
1698 core::save_construct_data_adl(
1699 ar,std::addressof(x.first),key_version);
1700 ar<<serialization::make_nvp("key",x.first);
1701 core::save_construct_data_adl(
1702 ar,std::addressof(x.second),mapped_version);
1703 ar<<serialization::make_nvp("mapped",x.second);
1704 });
1705 }
1706
1707 template<typename Archive>
1708 void load(Archive& ar,unsigned int version)
1709 {
1710 load(
1711 ar,version,
1712 std::integral_constant<bool,std::is_same<key_type,value_type>::value>{});
1713 }
1714
1715 template<typename Archive>
1716 void load(Archive& ar,unsigned int,std::true_type )
1717 {
1718 auto lck=exclusive_access();
1719 std::size_t s;
1720 serialization_version<value_type> value_version;
1721
1722 ar>>core::make_nvp("count",s);
1723 ar>>core::make_nvp("value_version",value_version);
1724
1725 super::clear();
1726 super::reserve(s);
1727
1728 for(std::size_t n=0;n<s;++n){
1729 archive_constructed<value_type> value("item",ar,value_version);
1730 auto& x=value.get();
1731 auto hash=this->hash_for(x);
1732 auto pos0=this->position_for(hash);
1733
1734 if(this->find(x,pos0,hash))throw_exception(bad_archive_exception());
1735 auto loc=this->unchecked_emplace_at(pos0,hash,std::move(x));
1736 ar.reset_object_address(std::addressof(*loc.p),std::addressof(x));
1737 }
1738 }
1739
1740 template<typename Archive>
1741 void load(Archive& ar,unsigned int,std::false_type )
1742 {
1743 using raw_key_type=typename std::remove_const<key_type>::type;
1744 using raw_mapped_type=typename std::remove_const<
1745 typename TypePolicy::mapped_type>::type;
1746
1747 auto lck=exclusive_access();
1748 std::size_t s;
1749 serialization_version<raw_key_type> key_version;
1750 serialization_version<raw_mapped_type> mapped_version;
1751
1752 ar>>core::make_nvp("count",s);
1753 ar>>core::make_nvp("key_version",key_version);
1754 ar>>core::make_nvp("mapped_version",mapped_version);
1755
1756 super::clear();
1757 super::reserve(s);
1758
1759 for(std::size_t n=0;n<s;++n){
1760 archive_constructed<raw_key_type> key("key",ar,key_version);
1761 archive_constructed<raw_mapped_type> mapped("mapped",ar,mapped_version);
1762 auto& k=key.get();
1763 auto& m=mapped.get();
1764 auto hash=this->hash_for(k);
1765 auto pos0=this->position_for(hash);
1766
1767 if(this->find(k,pos0,hash))throw_exception(bad_archive_exception());
1768 auto loc=this->unchecked_emplace_at(pos0,hash,std::move(k),std::move(m));
1769 ar.reset_object_address(std::addressof(loc.p->first),std::addressof(k));
1770 ar.reset_object_address(std::addressof(loc.p->second),std::addressof(m));
1771 }
1772 }
1773
1774 static std::atomic<std::size_t> thread_counter;
1775 mutable multimutex_type mutexes;
1776 };
1777
1778 template<typename T,typename H,typename P,typename A>
1779 std::atomic<std::size_t> concurrent_table<T,H,P,A>::thread_counter={};
1780
1781 #if defined(BOOST_MSVC)
1782 #pragma warning(pop)
1783 #endif
1784
1785 #include <boost/unordered/detail/foa/restore_wshadow.hpp>
1786
1787 }
1788 }
1789 }
1790 }
1791
1792 #endif