File indexing completed on 2025-12-03 09:57:25
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP
0013 #define BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP
0014
0015 #include <atomic>
0016 #include <boost/assert.hpp>
0017 #include <boost/config.hpp>
0018 #include <boost/core/ignore_unused.hpp>
0019 #include <boost/core/no_exceptions_support.hpp>
0020 #include <boost/core/serialization.hpp>
0021 #include <boost/cstdint.hpp>
0022 #include <boost/mp11/tuple.hpp>
0023 #include <boost/throw_exception.hpp>
0024 #include <boost/unordered/detail/archive_constructed.hpp>
0025 #include <boost/unordered/detail/bad_archive_exception.hpp>
0026 #include <boost/unordered/detail/foa/core.hpp>
0027 #include <boost/unordered/detail/foa/reentrancy_check.hpp>
0028 #include <boost/unordered/detail/foa/rw_spinlock.hpp>
0029 #include <boost/unordered/detail/foa/tuple_rotate_right.hpp>
0030 #include <boost/unordered/detail/serialization_version.hpp>
0031 #include <boost/unordered/detail/static_assert.hpp>
0032 #include <boost/unordered/detail/type_traits.hpp>
0033 #include <cstddef>
0034 #include <functional>
0035 #include <initializer_list>
0036 #include <iterator>
0037 #include <memory>
0038 #include <new>
0039 #include <type_traits>
0040 #include <tuple>
0041 #include <utility>
0042
0043 #if !defined(BOOST_UNORDERED_DISABLE_PARALLEL_ALGORITHMS)
0044 #if defined(BOOST_UNORDERED_ENABLE_PARALLEL_ALGORITHMS)|| \
0045 !defined(BOOST_NO_CXX17_HDR_EXECUTION)
0046 #define BOOST_UNORDERED_PARALLEL_ALGORITHMS
0047 #endif
0048 #endif
0049
0050 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0051 #include <algorithm>
0052 #include <execution>
0053 #endif
0054
0055 namespace boost{
0056 namespace unordered{
0057 namespace detail{
0058
0059 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0060
0061 template<typename ExecutionPolicy>
0062 using is_execution_policy=std::is_execution_policy<
0063 typename std::remove_cv<
0064 typename std::remove_reference<ExecutionPolicy>::type
0065 >::type
0066 >;
0067
0068 #else
0069
0070 template<typename ExecutionPolicy>
0071 using is_execution_policy=std::false_type;
0072
0073 #endif
0074
0075 namespace foa{
0076
0077 static constexpr std::size_t cacheline_size=64;
0078
0079 template<typename T,std::size_t N>
0080 class cache_aligned_array
0081 {
0082 public:
0083 cache_aligned_array(){for(std::size_t n=0;n<N;)::new (data(n++)) T();}
0084 ~cache_aligned_array(){for(auto n=N;n>0;)data(n--)->~T();}
0085 cache_aligned_array(const cache_aligned_array&)=delete;
0086 cache_aligned_array& operator=(const cache_aligned_array&)=delete;
0087
0088 T& operator[](std::size_t pos)noexcept{return *data(pos);}
0089
0090 private:
0091 static constexpr std::size_t element_offset=
0092 (sizeof(T)+cacheline_size-1)/cacheline_size*cacheline_size;
0093
0094 BOOST_UNORDERED_STATIC_ASSERT(alignof(T)<=cacheline_size);
0095
0096 T* data(std::size_t pos)noexcept
0097 {
0098 return reinterpret_cast<T*>(
0099 (reinterpret_cast<uintptr_t>(&buf)+cacheline_size-1)/
0100 cacheline_size*cacheline_size
0101 +pos*element_offset);
0102 }
0103
0104 unsigned char buf[element_offset*N+cacheline_size-1];
0105 };
0106
0107 template<typename Mutex,std::size_t N>
0108 class multimutex
0109 {
0110 public:
0111 constexpr std::size_t size()const noexcept{return N;}
0112
0113 Mutex& operator[](std::size_t pos)noexcept
0114 {
0115 BOOST_ASSERT(pos<N);
0116 return mutexes[pos];
0117 }
0118
0119 void lock()noexcept{for(std::size_t n=0;n<N;)mutexes[n++].lock();}
0120 void unlock()noexcept{for(auto n=N;n>0;)mutexes[--n].unlock();}
0121
0122 private:
0123 cache_aligned_array<Mutex,N> mutexes;
0124 };
0125
0126
0127
0128 template<typename Mutex>
0129 class shared_lock
0130 {
0131 public:
0132 shared_lock(Mutex& m_)noexcept:m(m_){m.lock_shared();}
0133 ~shared_lock()noexcept{if(owns)m.unlock_shared();}
0134
0135
0136 shared_lock(const shared_lock&);
0137
0138 void lock(){BOOST_ASSERT(!owns);m.lock_shared();owns=true;}
0139 void unlock(){BOOST_ASSERT(owns);m.unlock_shared();owns=false;}
0140
0141 private:
0142 Mutex &m;
0143 bool owns=true;
0144 };
0145
0146
0147
0148
0149
0150 template<typename Mutex>
0151 class lock_guard
0152 {
0153 public:
0154 lock_guard(Mutex& m_)noexcept:m(m_){m.lock();}
0155 ~lock_guard()noexcept{m.unlock();}
0156
0157
0158 lock_guard(const lock_guard&);
0159
0160 private:
0161 Mutex &m;
0162 };
0163
0164
0165
0166 template<typename Mutex>
0167 class scoped_bilock
0168 {
0169 public:
0170 scoped_bilock(Mutex& m1,Mutex& m2)noexcept
0171 {
0172 bool mutex_lt=std::less<Mutex*>{}(&m1,&m2);
0173
0174 pm1=mutex_lt?&m1:&m2;
0175 pm1->lock();
0176 if(&m1==&m2){
0177 pm2=nullptr;
0178 }
0179 else{
0180 pm2=mutex_lt?&m2:&m1;
0181 pm2->lock();
0182 }
0183 }
0184
0185
0186 scoped_bilock(const scoped_bilock&);
0187
0188 ~scoped_bilock()noexcept
0189 {
0190 if(pm2)pm2->unlock();
0191 pm1->unlock();
0192 }
0193
0194 private:
0195 Mutex *pm1,*pm2;
0196 };
0197
0198
0199
0200 template<typename Integral>
0201 struct atomic_integral
0202 {
0203 operator Integral()const{return n.load(std::memory_order_relaxed);}
0204 void operator=(Integral m){n.store(m,std::memory_order_relaxed);}
0205 void operator|=(Integral m){n.fetch_or(m,std::memory_order_relaxed);}
0206 void operator&=(Integral m){n.fetch_and(m,std::memory_order_relaxed);}
0207
0208 atomic_integral& operator=(atomic_integral const& rhs) {
0209 n.store(rhs.n.load(std::memory_order_relaxed),std::memory_order_relaxed);
0210 return *this;
0211 }
0212
0213 std::atomic<Integral> n;
0214 };
0215
0216
0217
0218
0219
0220
0221 struct group_access
0222 {
0223 using mutex_type=rw_spinlock;
0224 using shared_lock_guard=shared_lock<mutex_type>;
0225 using exclusive_lock_guard=lock_guard<mutex_type>;
0226 using insert_counter_type=std::atomic<boost::uint32_t>;
0227
0228 shared_lock_guard shared_access(){return shared_lock_guard{m};}
0229 exclusive_lock_guard exclusive_access(){return exclusive_lock_guard{m};}
0230 insert_counter_type& insert_counter(){return cnt;}
0231
0232 private:
0233 mutex_type m;
0234 insert_counter_type cnt{0};
0235 };
0236
0237 template<std::size_t Size>
0238 group_access* dummy_group_accesses()
0239 {
0240
0241
0242
0243
0244
0245
0246 static group_access accesses[Size];
0247
0248 return accesses;
0249 }
0250
0251
0252
0253 template<typename Value,typename Group,typename SizePolicy,typename Allocator>
0254 struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy,Allocator>
0255 {
0256 using group_access_allocator_type=
0257 typename boost::allocator_rebind<Allocator,group_access>::type;
0258 using group_access_pointer=
0259 typename boost::allocator_pointer<group_access_allocator_type>::type;
0260
0261 using super=table_arrays<Value,Group,SizePolicy,Allocator>;
0262 using allocator_type=typename super::allocator_type;
0263
0264 concurrent_table_arrays(const super& arrays,group_access_pointer pga):
0265 super{arrays},group_accesses_{pga}{}
0266
0267 group_access* group_accesses()const noexcept{
0268 return boost::to_address(group_accesses_);
0269 }
0270
0271 static concurrent_table_arrays new_(allocator_type al,std::size_t n)
0272 {
0273 super x{super::new_(al,n)};
0274 BOOST_TRY{
0275 return new_group_access(group_access_allocator_type(al),x);
0276 }
0277 BOOST_CATCH(...){
0278 super::delete_(al,x);
0279 BOOST_RETHROW
0280 }
0281 BOOST_CATCH_END
0282 }
0283
0284 static void set_group_access(
0285 group_access_allocator_type al,concurrent_table_arrays& arrays)
0286 {
0287 set_group_access(
0288 al,arrays,std::is_same<group_access*,group_access_pointer>{});
0289 }
0290
0291 static void set_group_access(
0292 group_access_allocator_type al,
0293 concurrent_table_arrays& arrays,
0294 std::false_type )
0295 {
0296 arrays.group_accesses_=
0297 boost::allocator_allocate(al,arrays.groups_size_mask+1);
0298
0299 for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
0300 ::new (arrays.group_accesses()+i) group_access();
0301 }
0302 }
0303
0304 static void set_group_access(
0305 group_access_allocator_type al,
0306 concurrent_table_arrays& arrays,
0307 std::true_type )
0308 {
0309 if(!arrays.elements()){
0310 arrays.group_accesses_=
0311 dummy_group_accesses<SizePolicy::min_size()>();
0312 } else {
0313 set_group_access(al,arrays,std::false_type{});
0314 }
0315 }
0316
0317 static concurrent_table_arrays new_group_access(
0318 group_access_allocator_type al,const super& x)
0319 {
0320 concurrent_table_arrays arrays{x,nullptr};
0321 set_group_access(al,arrays);
0322 return arrays;
0323 }
0324
0325 static void delete_(allocator_type al,concurrent_table_arrays& arrays)noexcept
0326 {
0327 delete_group_access(group_access_allocator_type(al),arrays);
0328 super::delete_(al,arrays);
0329 }
0330
0331 static void delete_group_access(
0332 group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept
0333 {
0334 if(arrays.elements()){
0335 boost::allocator_deallocate(
0336 al,arrays.group_accesses_,arrays.groups_size_mask+1);
0337 }
0338 }
0339
0340 group_access_pointer group_accesses_;
0341 };
0342
0343 struct atomic_size_control
0344 {
0345 static constexpr auto atomic_size_t_size=sizeof(std::atomic<std::size_t>);
0346 BOOST_UNORDERED_STATIC_ASSERT(atomic_size_t_size<cacheline_size);
0347
0348 atomic_size_control(std::size_t ml_,std::size_t size_):
0349 pad0_{},ml{ml_},pad1_{},size{size_}{}
0350 atomic_size_control(const atomic_size_control& x):
0351 pad0_{},ml{x.ml.load()},pad1_{},size{x.size.load()}{}
0352
0353
0354
0355 unsigned char pad0_[cacheline_size-atomic_size_t_size];
0356 std::atomic<std::size_t> ml;
0357 unsigned char pad1_[cacheline_size-atomic_size_t_size];
0358 std::atomic<std::size_t> size;
0359 };
0360
0361
0362
0363 inline void
0364 swap_atomic_size_t(std::atomic<std::size_t>& x,std::atomic<std::size_t>& y)
0365 {
0366 std::size_t tmp=x;
0367 x=static_cast<std::size_t>(y);
0368 y=tmp;
0369 }
0370
0371 inline void swap(atomic_size_control& x,atomic_size_control& y)
0372 {
0373 swap_atomic_size_t(x.ml,y.ml);
0374 swap_atomic_size_t(x.size,y.size);
0375 }
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432 template<typename,typename,typename,typename>
0433 class table;
0434
0435 template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
0436 using concurrent_table_core_impl=table_core<
0437 TypePolicy,group15<atomic_integral>,concurrent_table_arrays,
0438 atomic_size_control,Hash,Pred,Allocator>;
0439
0440 #include <boost/unordered/detail/foa/ignore_wshadow.hpp>
0441
0442 #if defined(BOOST_MSVC)
0443 #pragma warning(push)
0444 #pragma warning(disable:4714)
0445 #endif
0446
0447 template<typename TypePolicy,typename Hash,typename Pred,typename Allocator>
0448 class concurrent_table:
0449 concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>
0450 {
0451 using super=concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>;
0452 using type_policy=typename super::type_policy;
0453 using group_type=typename super::group_type;
0454 using super::N;
0455 using prober=typename super::prober;
0456 using arrays_type=typename super::arrays_type;
0457 using size_ctrl_type=typename super::size_ctrl_type;
0458 using compatible_nonconcurrent_table=table<TypePolicy,Hash,Pred,Allocator>;
0459 friend compatible_nonconcurrent_table;
0460
0461 public:
0462 using key_type=typename super::key_type;
0463 using init_type=typename super::init_type;
0464 using value_type=typename super::value_type;
0465 using element_type=typename super::element_type;
0466 using hasher=typename super::hasher;
0467 using key_equal=typename super::key_equal;
0468 using allocator_type=typename super::allocator_type;
0469 using size_type=typename super::size_type;
0470 static constexpr std::size_t bulk_visit_size=16;
0471
0472 #if defined(BOOST_UNORDERED_ENABLE_STATS)
0473 using stats=typename super::stats;
0474 #endif
0475
0476 private:
0477 template<typename Value,typename T>
0478 using enable_if_is_value_type=typename std::enable_if<
0479 !std::is_same<init_type,value_type>::value&&
0480 std::is_same<Value,value_type>::value,
0481 T
0482 >::type;
0483
0484 public:
0485 concurrent_table(
0486 std::size_t n=default_bucket_count,const Hash& h_=Hash(),
0487 const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
0488 super{n,h_,pred_,al_}
0489 {}
0490
0491 concurrent_table(const concurrent_table& x):
0492 concurrent_table(x,x.exclusive_access()){}
0493 concurrent_table(concurrent_table&& x):
0494 concurrent_table(std::move(x),x.exclusive_access()){}
0495 concurrent_table(const concurrent_table& x,const Allocator& al_):
0496 concurrent_table(x,al_,x.exclusive_access()){}
0497 concurrent_table(concurrent_table&& x,const Allocator& al_):
0498 concurrent_table(std::move(x),al_,x.exclusive_access()){}
0499
0500 template<typename ArraysType>
0501 concurrent_table(
0502 compatible_nonconcurrent_table&& x,
0503 arrays_holder<ArraysType,Allocator>&& ah):
0504 super{
0505 std::move(x.h()),std::move(x.pred()),std::move(x.al()),
0506 [&x]{return arrays_type::new_group_access(
0507 x.al(),typename arrays_type::super{
0508 x.arrays.groups_size_index,x.arrays.groups_size_mask,
0509 to_pointer<typename arrays_type::group_type_pointer>(
0510 reinterpret_cast<group_type*>(x.arrays.groups())),
0511 x.arrays.elements_});},
0512 size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}}
0513 {
0514 x.arrays=ah.release();
0515 x.size_ctrl.ml=x.initial_max_load();
0516 x.size_ctrl.size=0;
0517 BOOST_UNORDERED_SWAP_STATS(this->cstats,x.cstats);
0518 }
0519
0520 concurrent_table(compatible_nonconcurrent_table&& x):
0521 concurrent_table(std::move(x),x.make_empty_arrays())
0522 {}
0523
0524 ~concurrent_table()=default;
0525
0526 concurrent_table& operator=(const concurrent_table& x)
0527 {
0528 auto lck=exclusive_access(*this,x);
0529 super::operator=(x);
0530 return *this;
0531 }
0532
0533 concurrent_table& operator=(concurrent_table&& x)noexcept(
0534 noexcept(std::declval<super&>() = std::declval<super&&>()))
0535 {
0536 auto lck=exclusive_access(*this,x);
0537 super::operator=(std::move(x));
0538 return *this;
0539 }
0540
0541 concurrent_table& operator=(std::initializer_list<value_type> il) {
0542 auto lck=exclusive_access();
0543 super::clear();
0544 super::noshrink_reserve(il.size());
0545 for (auto const& v : il) {
0546 this->unprotected_emplace(v);
0547 }
0548 return *this;
0549 }
0550
0551 allocator_type get_allocator()const noexcept
0552 {
0553 auto lck=shared_access();
0554 return super::get_allocator();
0555 }
0556
0557 template<typename Key,typename F>
0558 BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)
0559 {
0560 return visit_impl(group_exclusive{},x,std::forward<F>(f));
0561 }
0562
0563 template<typename Key,typename F>
0564 BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)const
0565 {
0566 return visit_impl(group_shared{},x,std::forward<F>(f));
0567 }
0568
0569 template<typename Key,typename F>
0570 BOOST_FORCEINLINE std::size_t cvisit(const Key& x,F&& f)const
0571 {
0572 return visit(x,std::forward<F>(f));
0573 }
0574
0575 template<typename FwdIterator,typename F>
0576 BOOST_FORCEINLINE
0577 std::size_t visit(FwdIterator first,FwdIterator last,F&& f)
0578 {
0579 return bulk_visit_impl(group_exclusive{},first,last,std::forward<F>(f));
0580 }
0581
0582 template<typename FwdIterator,typename F>
0583 BOOST_FORCEINLINE
0584 std::size_t visit(FwdIterator first,FwdIterator last,F&& f)const
0585 {
0586 return bulk_visit_impl(group_shared{},first,last,std::forward<F>(f));
0587 }
0588
0589 template<typename FwdIterator,typename F>
0590 BOOST_FORCEINLINE
0591 std::size_t cvisit(FwdIterator first,FwdIterator last,F&& f)const
0592 {
0593 return visit(first,last,std::forward<F>(f));
0594 }
0595
0596 template<typename F> std::size_t visit_all(F&& f)
0597 {
0598 return visit_all_impl(group_exclusive{},std::forward<F>(f));
0599 }
0600
0601 template<typename F> std::size_t visit_all(F&& f)const
0602 {
0603 return visit_all_impl(group_shared{},std::forward<F>(f));
0604 }
0605
0606 template<typename F> std::size_t cvisit_all(F&& f)const
0607 {
0608 return visit_all(std::forward<F>(f));
0609 }
0610
0611 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0612 template<typename ExecutionPolicy,typename F>
0613 void visit_all(ExecutionPolicy&& policy,F&& f)
0614 {
0615 visit_all_impl(
0616 group_exclusive{},
0617 std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0618 }
0619
0620 template<typename ExecutionPolicy,typename F>
0621 void visit_all(ExecutionPolicy&& policy,F&& f)const
0622 {
0623 visit_all_impl(
0624 group_shared{},
0625 std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0626 }
0627
0628 template<typename ExecutionPolicy,typename F>
0629 void cvisit_all(ExecutionPolicy&& policy,F&& f)const
0630 {
0631 visit_all(std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0632 }
0633 #endif
0634
0635 template<typename F> bool visit_while(F&& f)
0636 {
0637 return visit_while_impl(group_exclusive{},std::forward<F>(f));
0638 }
0639
0640 template<typename F> bool visit_while(F&& f)const
0641 {
0642 return visit_while_impl(group_shared{},std::forward<F>(f));
0643 }
0644
0645 template<typename F> bool cvisit_while(F&& f)const
0646 {
0647 return visit_while(std::forward<F>(f));
0648 }
0649
0650 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0651 template<typename ExecutionPolicy,typename F>
0652 bool visit_while(ExecutionPolicy&& policy,F&& f)
0653 {
0654 return visit_while_impl(
0655 group_exclusive{},
0656 std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0657 }
0658
0659 template<typename ExecutionPolicy,typename F>
0660 bool visit_while(ExecutionPolicy&& policy,F&& f)const
0661 {
0662 return visit_while_impl(
0663 group_shared{},
0664 std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0665 }
0666
0667 template<typename ExecutionPolicy,typename F>
0668 bool cvisit_while(ExecutionPolicy&& policy,F&& f)const
0669 {
0670 return visit_while(
0671 std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
0672 }
0673 #endif
0674
0675 bool empty()const noexcept{return size()==0;}
0676
0677 std::size_t size()const noexcept
0678 {
0679 auto lck=shared_access();
0680 return unprotected_size();
0681 }
0682
0683 using super::max_size;
0684
0685 template<typename... Args>
0686 BOOST_FORCEINLINE bool emplace(Args&&... args)
0687 {
0688 return construct_and_emplace(std::forward<Args>(args)...);
0689 }
0690
0691
0692 template<typename Value>
0693 BOOST_FORCEINLINE auto emplace(Value&& x)->typename std::enable_if<
0694 detail::is_similar_to_any<Value,value_type,init_type>::value,bool>::type
0695 {
0696 return emplace_impl(std::forward<Value>(x));
0697 }
0698
0699
0700 template <typename K, typename V>
0701 BOOST_FORCEINLINE auto emplace(K&& k, V&& v) ->
0702 typename std::enable_if<is_emplace_kv_able<concurrent_table, K>::value,
0703 bool>::type
0704 {
0705 alloc_cted_or_fwded_key_type<type_policy, Allocator, K&&> x(
0706 this->al(), std::forward<K>(k));
0707 return emplace_impl(
0708 try_emplace_args_t{}, x.move_or_fwd(), std::forward<V>(v));
0709 }
0710
0711 BOOST_FORCEINLINE bool
0712 insert(const init_type& x){return emplace_impl(x);}
0713
0714 BOOST_FORCEINLINE bool
0715 insert(init_type&& x){return emplace_impl(std::move(x));}
0716
0717
0718
0719 template<typename=void>
0720 BOOST_FORCEINLINE bool
0721 insert(const value_type& x){return emplace_impl(x);}
0722
0723 template<typename=void>
0724 BOOST_FORCEINLINE bool
0725 insert(value_type&& x){return emplace_impl(std::move(x));}
0726
0727 template<typename T=element_type>
0728 BOOST_FORCEINLINE
0729 typename std::enable_if<
0730 !std::is_same<T,value_type>::value,
0731 bool
0732 >::type
0733 insert(element_type&& x){return emplace_impl(std::move(x));}
0734
0735 template<typename Key,typename... Args>
0736 BOOST_FORCEINLINE bool try_emplace(Key&& x,Args&&... args)
0737 {
0738 return emplace_impl(
0739 try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0740 }
0741
0742 template<typename Key,typename... Args>
0743 BOOST_FORCEINLINE bool try_emplace_or_visit(Key&& x,Args&&... args)
0744 {
0745 return emplace_or_visit_flast(
0746 group_exclusive{},
0747 try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0748 }
0749
0750 template<typename Key,typename... Args>
0751 BOOST_FORCEINLINE bool try_emplace_or_cvisit(Key&& x,Args&&... args)
0752 {
0753 return emplace_or_visit_flast(
0754 group_shared{},
0755 try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0756 }
0757
0758 template<typename Key,typename... Args>
0759 BOOST_FORCEINLINE bool try_emplace_and_visit(Key&& x,Args&&... args)
0760 {
0761 return emplace_and_visit_flast(
0762 group_exclusive{},
0763 try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0764 }
0765
0766 template<typename Key,typename... Args>
0767 BOOST_FORCEINLINE bool try_emplace_and_cvisit(Key&& x,Args&&... args)
0768 {
0769 return emplace_and_visit_flast(
0770 group_shared{},
0771 try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
0772 }
0773
0774 template<typename... Args>
0775 BOOST_FORCEINLINE bool emplace_or_visit(Args&&... args)
0776 {
0777 return construct_and_emplace_or_visit_flast(
0778 group_exclusive{},std::forward<Args>(args)...);
0779 }
0780
0781 template<typename... Args>
0782 BOOST_FORCEINLINE bool emplace_or_cvisit(Args&&... args)
0783 {
0784 return construct_and_emplace_or_visit_flast(
0785 group_shared{},std::forward<Args>(args)...);
0786 }
0787
0788 template<typename... Args>
0789 BOOST_FORCEINLINE bool emplace_and_visit(Args&&... args)
0790 {
0791 return construct_and_emplace_and_visit_flast(
0792 group_exclusive{},std::forward<Args>(args)...);
0793 }
0794
0795 template<typename... Args>
0796 BOOST_FORCEINLINE bool emplace_and_cvisit(Args&&... args)
0797 {
0798 return construct_and_emplace_and_visit_flast(
0799 group_shared{},std::forward<Args>(args)...);
0800 }
0801
0802 template<typename Value,typename F>
0803 BOOST_FORCEINLINE bool insert_or_visit(Value&& x,F&& f)
0804 {
0805 return insert_and_visit(
0806 std::forward<Value>(x),[](const value_type&){},std::forward<F>(f));
0807 }
0808
0809 template<typename Value,typename F>
0810 BOOST_FORCEINLINE bool insert_or_cvisit(Value&& x,F&& f)
0811 {
0812 return insert_and_cvisit(
0813 std::forward<Value>(x),[](const value_type&){},std::forward<F>(f));
0814 }
0815
0816 template<typename F1,typename F2>
0817 BOOST_FORCEINLINE bool insert_and_visit(const init_type& x,F1&& f1,F2&& f2)
0818 {
0819 return emplace_and_visit_impl(
0820 group_exclusive{},std::forward<F1>(f1),std::forward<F2>(f2),x);
0821 }
0822
0823 template<typename F1,typename F2>
0824 BOOST_FORCEINLINE bool insert_and_cvisit(const init_type& x,F1&& f1,F2&& f2)
0825 {
0826 return emplace_and_visit_impl(
0827 group_shared{},std::forward<F1>(f1),std::forward<F2>(f2),x);
0828 }
0829
0830 template<typename F1,typename F2>
0831 BOOST_FORCEINLINE bool insert_and_visit(init_type&& x,F1&& f1,F2&& f2)
0832 {
0833 return emplace_and_visit_impl(
0834 group_exclusive{},std::forward<F1>(f1),std::forward<F2>(f2),
0835 std::move(x));
0836 }
0837
0838 template<typename F1,typename F2>
0839 BOOST_FORCEINLINE bool insert_and_cvisit(init_type&& x,F1&& f1,F2&& f2)
0840 {
0841 return emplace_and_visit_impl(
0842 group_shared{},std::forward<F1>(f1),std::forward<F2>(f2),std::move(x));
0843 }
0844
0845
0846
0847 template<typename Value,typename F1,typename F2>
0848 BOOST_FORCEINLINE auto insert_and_visit(const Value& x,F1&& f1,F2&& f2)
0849 ->enable_if_is_value_type<Value,bool>
0850 {
0851 return emplace_and_visit_impl(
0852 group_exclusive{},std::forward<F1>(f1),std::forward<F2>(f2),x);
0853 }
0854
0855 template<typename Value,typename F1,typename F2>
0856 BOOST_FORCEINLINE auto insert_and_cvisit(const Value& x,F1&& f1,F2&& f2)
0857 ->enable_if_is_value_type<Value,bool>
0858 {
0859 return emplace_and_visit_impl(
0860 group_shared{},std::forward<F1>(f1),std::forward<F2>(f2),x);
0861 }
0862
0863 template<typename Value,typename F1,typename F2>
0864 BOOST_FORCEINLINE auto insert_and_visit(Value&& x,F1&& f1,F2&& f2)
0865 ->enable_if_is_value_type<Value,bool>
0866 {
0867 return emplace_and_visit_impl(
0868 group_exclusive{},std::forward<F1>(f1),std::forward<F2>(f2),
0869 std::move(x));
0870 }
0871
0872 template<typename Value,typename F1,typename F2>
0873 BOOST_FORCEINLINE auto insert_and_cvisit(Value&& x,F1&& f1,F2&& f2)
0874 ->enable_if_is_value_type<Value,bool>
0875 {
0876 return emplace_and_visit_impl(
0877 group_shared{},std::forward<F1>(f1),std::forward<F2>(f2),std::move(x));
0878 }
0879
0880 template<typename F1,typename F2,typename T=element_type>
0881 BOOST_FORCEINLINE
0882 typename std::enable_if<
0883 !std::is_same<T,value_type>::value,
0884 bool
0885 >::type
0886 insert_and_visit(element_type&& x,F1&& f1,F2&& f2)
0887 {
0888 return emplace_and_visit_impl(
0889 group_exclusive{},std::forward<F1>(f1),std::forward<F2>(f2),
0890 std::move(x));
0891 }
0892
0893 template<typename F1,typename F2,typename T=element_type>
0894 BOOST_FORCEINLINE
0895 typename std::enable_if<
0896 !std::is_same<T,value_type>::value,
0897 bool
0898 >::type
0899 insert_and_cvisit(element_type&& x,F1&& f1,F2&& f2)
0900 {
0901 return emplace_and_visit_impl(
0902 group_shared{},std::forward<F1>(f1),std::forward<F2>(f2),std::move(x));
0903 }
0904
0905 template<typename Key>
0906 BOOST_FORCEINLINE std::size_t erase(const Key& x)
0907 {
0908 return erase_if(x,[](const value_type&){return true;});
0909 }
0910
0911 template<typename Key,typename F>
0912 BOOST_FORCEINLINE auto erase_if(const Key& x,F&& f)->typename std::enable_if<
0913 !is_execution_policy<Key>::value,std::size_t>::type
0914 {
0915 auto lck=shared_access();
0916 auto hash=this->hash_for(x);
0917 std::size_t res=0;
0918 unprotected_internal_visit(
0919 group_exclusive{},x,this->position_for(hash),hash,
0920 [&,this](group_type* pg,unsigned int n,element_type* p)
0921 {
0922 if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
0923 super::erase(pg,n,p);
0924 res=1;
0925 }
0926 });
0927 return res;
0928 }
0929
0930 template<typename F>
0931 std::size_t erase_if(F&& f)
0932 {
0933 auto lck=shared_access();
0934 std::size_t res=0;
0935 for_all_elements(
0936 group_exclusive{},
0937 [&,this](group_type* pg,unsigned int n,element_type* p){
0938 if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
0939 super::erase(pg,n,p);
0940 ++res;
0941 }
0942 });
0943 return res;
0944 }
0945
0946 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
0947 template<typename ExecutionPolicy,typename F>
0948 auto erase_if(ExecutionPolicy&& policy,F&& f)->typename std::enable_if<
0949 is_execution_policy<ExecutionPolicy>::value,void>::type
0950 {
0951 auto lck=shared_access();
0952 for_all_elements(
0953 group_exclusive{},std::forward<ExecutionPolicy>(policy),
0954 [&,this](group_type* pg,unsigned int n,element_type* p){
0955 if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
0956 super::erase(pg,n,p);
0957 }
0958 });
0959 }
0960 #endif
0961
0962 void swap(concurrent_table& x)
0963 noexcept(noexcept(std::declval<super&>().swap(std::declval<super&>())))
0964 {
0965 auto lck=exclusive_access(*this,x);
0966 super::swap(x);
0967 }
0968
0969 void clear()noexcept
0970 {
0971 auto lck=exclusive_access();
0972 super::clear();
0973 }
0974
0975 template<typename Key,typename Extractor>
0976 BOOST_FORCEINLINE void extract(const Key& x,Extractor&& ext)
0977 {
0978 extract_if(
0979 x,[](const value_type&){return true;},std::forward<Extractor>(ext));
0980 }
0981
0982 template<typename Key,typename F,typename Extractor>
0983 BOOST_FORCEINLINE void extract_if(const Key& x,F&& f,Extractor&& ext)
0984 {
0985 auto lck=shared_access();
0986 auto hash=this->hash_for(x);
0987 unprotected_internal_visit(
0988 group_exclusive{},x,this->position_for(hash),hash,
0989 [&,this](group_type* pg,unsigned int n,element_type* p)
0990 {
0991 if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
0992 ext(std::move(*p),this->al());
0993 super::erase(pg,n,p);
0994 }
0995 });
0996 }
0997
0998
0999 template<typename Hash2,typename Pred2>
1000 size_type merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& x)
1001 {
1002 using merge_table_type=concurrent_table<TypePolicy,Hash2,Pred2,Allocator>;
1003 using super2=typename merge_table_type::super;
1004
1005
1006 boost::ignore_unused<super2>();
1007
1008 auto lck=exclusive_access(*this,x);
1009 size_type s=super::size();
1010 x.super2::for_all_elements(
1011 [&,this](group_type* pg,unsigned int n,element_type* p){
1012 typename merge_table_type::erase_on_exit e{x,pg,n,p};
1013 if(!unprotected_emplace(type_policy::move(*p)))e.rollback();
1014 });
1015 return size_type{super::size()-s};
1016 }
1017
1018 template<typename Hash2,typename Pred2>
1019 void merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>&& x){merge(x);}
1020
1021 hasher hash_function()const
1022 {
1023 auto lck=shared_access();
1024 return super::hash_function();
1025 }
1026
1027 key_equal key_eq()const
1028 {
1029 auto lck=shared_access();
1030 return super::key_eq();
1031 }
1032
1033 template<typename Key>
1034 BOOST_FORCEINLINE std::size_t count(Key&& x)const
1035 {
1036 return (std::size_t)contains(std::forward<Key>(x));
1037 }
1038
1039 template<typename Key>
1040 BOOST_FORCEINLINE bool contains(Key&& x)const
1041 {
1042 return visit(std::forward<Key>(x),[](const value_type&){})!=0;
1043 }
1044
1045 std::size_t capacity()const noexcept
1046 {
1047 auto lck=shared_access();
1048 return super::capacity();
1049 }
1050
1051 float load_factor()const noexcept
1052 {
1053 auto lck=shared_access();
1054 if(super::capacity()==0)return 0;
1055 else return float(unprotected_size())/
1056 float(super::capacity());
1057 }
1058
1059 using super::max_load_factor;
1060
1061 std::size_t max_load()const noexcept
1062 {
1063 auto lck=shared_access();
1064 return super::max_load();
1065 }
1066
1067 void rehash(std::size_t n)
1068 {
1069 auto lck=exclusive_access();
1070 super::rehash(n);
1071 }
1072
1073 void reserve(std::size_t n)
1074 {
1075 auto lck=exclusive_access();
1076 super::reserve(n);
1077 }
1078
1079 #if defined(BOOST_UNORDERED_ENABLE_STATS)
1080
1081
1082 using super::get_stats;
1083 using super::reset_stats;
1084 #endif
1085
1086 template<typename Predicate>
1087 friend std::size_t erase_if(concurrent_table& x,Predicate&& pr)
1088 {
1089 return x.erase_if(std::forward<Predicate>(pr));
1090 }
1091
1092 friend bool operator==(const concurrent_table& x,const concurrent_table& y)
1093 {
1094 auto lck=exclusive_access(x,y);
1095 return static_cast<const super&>(x)==static_cast<const super&>(y);
1096 }
1097
1098 friend bool operator!=(const concurrent_table& x,const concurrent_table& y)
1099 {
1100 return !(x==y);
1101 }
1102
1103 private:
1104 template<typename,typename,typename,typename> friend class concurrent_table;
1105
1106 using mutex_type=rw_spinlock;
1107 using multimutex_type=multimutex<mutex_type,128>;
1108 using shared_lock_guard=reentrancy_checked<shared_lock<mutex_type>>;
1109 using exclusive_lock_guard=reentrancy_checked<lock_guard<multimutex_type>>;
1110 using exclusive_bilock_guard=
1111 reentrancy_bichecked<scoped_bilock<multimutex_type>>;
1112 using group_shared_lock_guard=typename group_access::shared_lock_guard;
1113 using group_exclusive_lock_guard=typename group_access::exclusive_lock_guard;
1114 using group_insert_counter_type=typename group_access::insert_counter_type;
1115
1116 concurrent_table(const concurrent_table& x,exclusive_lock_guard):
1117 super{x}{}
1118 concurrent_table(concurrent_table&& x,exclusive_lock_guard):
1119 super{std::move(x)}{}
1120 concurrent_table(
1121 const concurrent_table& x,const Allocator& al_,exclusive_lock_guard):
1122 super{x,al_}{}
1123 concurrent_table(
1124 concurrent_table&& x,const Allocator& al_,exclusive_lock_guard):
1125 super{std::move(x),al_}{}
1126
1127 inline shared_lock_guard shared_access()const
1128 {
1129 thread_local auto id=(++thread_counter)%mutexes.size();
1130
1131 return shared_lock_guard{this,mutexes[id]};
1132 }
1133
1134 inline exclusive_lock_guard exclusive_access()const
1135 {
1136 return exclusive_lock_guard{this,mutexes};
1137 }
1138
1139 static inline exclusive_bilock_guard exclusive_access(
1140 const concurrent_table& x,const concurrent_table& y)
1141 {
1142 return {&x,&y,x.mutexes,y.mutexes};
1143 }
1144
1145 template<typename Hash2,typename Pred2>
1146 static inline exclusive_bilock_guard exclusive_access(
1147 const concurrent_table& x,
1148 const concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& y)
1149 {
1150 return {&x,&y,x.mutexes,y.mutexes};
1151 }
1152
1153
1154
1155 using group_shared=std::false_type;
1156 using group_exclusive=std::true_type;
1157
1158 inline group_shared_lock_guard access(group_shared,std::size_t pos)const
1159 {
1160 return this->arrays.group_accesses()[pos].shared_access();
1161 }
1162
1163 inline group_exclusive_lock_guard access(
1164 group_exclusive,std::size_t pos)const
1165 {
1166 return this->arrays.group_accesses()[pos].exclusive_access();
1167 }
1168
1169 inline group_insert_counter_type& insert_counter(std::size_t pos)const
1170 {
1171 return this->arrays.group_accesses()[pos].insert_counter();
1172 }
1173
1174
1175
1176
1177
1178
1179 static inline const value_type&
1180 cast_for(group_shared,value_type& x){return x;}
1181
1182 static inline typename std::conditional<
1183 std::is_same<key_type,value_type>::value,
1184 const value_type&,
1185 value_type&
1186 >::type
1187 cast_for(group_exclusive,value_type& x){return x;}
1188
1189 struct erase_on_exit
1190 {
1191 erase_on_exit(
1192 concurrent_table& x_,
1193 group_type* pg_,unsigned int pos_,element_type* p_):
1194 x(x_),pg(pg_),pos(pos_),p(p_){}
1195 ~erase_on_exit(){if(!rollback_)x.super::erase(pg,pos,p);}
1196
1197 void rollback(){rollback_=true;}
1198
1199 concurrent_table &x;
1200 group_type *pg;
1201 unsigned int pos;
1202 element_type *p;
1203 bool rollback_=false;
1204 };
1205
1206 template<typename GroupAccessMode,typename Key,typename F>
1207 BOOST_FORCEINLINE std::size_t visit_impl(
1208 GroupAccessMode access_mode,const Key& x,F&& f)const
1209 {
1210 auto lck=shared_access();
1211 auto hash=this->hash_for(x);
1212 return unprotected_visit(
1213 access_mode,x,this->position_for(hash),hash,std::forward<F>(f));
1214 }
1215
1216 template<typename GroupAccessMode,typename FwdIterator,typename F>
1217 BOOST_FORCEINLINE
1218 std::size_t bulk_visit_impl(
1219 GroupAccessMode access_mode,FwdIterator first,FwdIterator last,F&& f)const
1220 {
1221 auto lck=shared_access();
1222 std::size_t res=0;
1223 auto n=static_cast<std::size_t>(std::distance(first,last));
1224 while(n){
1225 auto m=n<2*bulk_visit_size?n:bulk_visit_size;
1226 res+=unprotected_bulk_visit(access_mode,first,m,std::forward<F>(f));
1227 n-=m;
1228 std::advance(
1229 first,
1230 static_cast<
1231 typename std::iterator_traits<FwdIterator>::difference_type>(m));
1232 }
1233 return res;
1234 }
1235
1236 template<typename GroupAccessMode,typename F>
1237 std::size_t visit_all_impl(GroupAccessMode access_mode,F&& f)const
1238 {
1239 auto lck=shared_access();
1240 std::size_t res=0;
1241 for_all_elements(access_mode,[&](element_type* p){
1242 f(cast_for(access_mode,type_policy::value_from(*p)));
1243 ++res;
1244 });
1245 return res;
1246 }
1247
1248 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
1249 template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1250 void visit_all_impl(
1251 GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
1252 {
1253 auto lck=shared_access();
1254 for_all_elements(
1255 access_mode,std::forward<ExecutionPolicy>(policy),
1256 [&](element_type* p){
1257 f(cast_for(access_mode,type_policy::value_from(*p)));
1258 });
1259 }
1260 #endif
1261
1262 template<typename GroupAccessMode,typename F>
1263 bool visit_while_impl(GroupAccessMode access_mode,F&& f)const
1264 {
1265 auto lck=shared_access();
1266 return for_all_elements_while(access_mode,[&](element_type* p){
1267 return f(cast_for(access_mode,type_policy::value_from(*p)));
1268 });
1269 }
1270
1271 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
1272 template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1273 bool visit_while_impl(
1274 GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
1275 {
1276 auto lck=shared_access();
1277 return for_all_elements_while(
1278 access_mode,std::forward<ExecutionPolicy>(policy),
1279 [&](element_type* p){
1280 return f(cast_for(access_mode,type_policy::value_from(*p)));
1281 });
1282 }
1283 #endif
1284
1285 template<typename GroupAccessMode,typename Key,typename F>
1286 BOOST_FORCEINLINE std::size_t unprotected_visit(
1287 GroupAccessMode access_mode,
1288 const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
1289 {
1290 return unprotected_internal_visit(
1291 access_mode,x,pos0,hash,
1292 [&](group_type*,unsigned int,element_type* p)
1293 {f(cast_for(access_mode,type_policy::value_from(*p)));});
1294 }
1295
1296 #if defined(BOOST_MSVC)
1297
1298 #pragma warning(push)
1299 #pragma warning(disable:4800)
1300 #endif
1301
1302 template<typename GroupAccessMode,typename Key,typename F>
1303 BOOST_FORCEINLINE std::size_t unprotected_internal_visit(
1304 GroupAccessMode access_mode,
1305 const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
1306 {
1307 BOOST_UNORDERED_STATS_COUNTER(num_cmps);
1308 prober pb(pos0);
1309 do{
1310 auto pos=pb.get();
1311 auto pg=this->arrays.groups()+pos;
1312 auto mask=pg->match(hash);
1313 if(mask){
1314 auto p=this->arrays.elements()+pos*N;
1315 BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
1316 auto lck=access(access_mode,pos);
1317 do{
1318 auto n=unchecked_countr_zero(mask);
1319 if(BOOST_LIKELY(pg->is_occupied(n))){
1320 BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
1321 if(BOOST_LIKELY(bool(this->pred()(x,this->key_from(p[n]))))){
1322 f(pg,n,p+n);
1323 BOOST_UNORDERED_ADD_STATS(
1324 this->cstats.successful_lookup,(pb.length(),num_cmps));
1325 return 1;
1326 }
1327 }
1328 mask&=mask-1;
1329 }while(mask);
1330 }
1331 if(BOOST_LIKELY(pg->is_not_overflowed(hash))){
1332 BOOST_UNORDERED_ADD_STATS(
1333 this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
1334 return 0;
1335 }
1336 }
1337 while(BOOST_LIKELY(pb.next(this->arrays.groups_size_mask)));
1338 BOOST_UNORDERED_ADD_STATS(
1339 this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
1340 return 0;
1341 }
1342
1343 template<typename GroupAccessMode,typename FwdIterator,typename F>
1344 BOOST_FORCEINLINE std::size_t unprotected_bulk_visit(
1345 GroupAccessMode access_mode,FwdIterator first,std::size_t m,F&& f)const
1346 {
1347 BOOST_ASSERT(m<2*bulk_visit_size);
1348
1349 std::size_t res=0,
1350 hashes[2*bulk_visit_size-1],
1351 positions[2*bulk_visit_size-1];
1352 int masks[2*bulk_visit_size-1];
1353 auto it=first;
1354
1355 for(auto i=m;i--;++it){
1356 auto hash=hashes[i]=this->hash_for(*it);
1357 auto pos=positions[i]=this->position_for(hash);
1358 BOOST_UNORDERED_PREFETCH(this->arrays.groups()+pos);
1359 }
1360
1361 for(auto i=m;i--;){
1362 auto hash=hashes[i];
1363 auto pos=positions[i];
1364 auto mask=masks[i]=(this->arrays.groups()+pos)->match(hash);
1365 if(mask){
1366 BOOST_UNORDERED_PREFETCH(this->arrays.group_accesses()+pos);
1367 BOOST_UNORDERED_PREFETCH(
1368 this->arrays.elements()+pos*N+unchecked_countr_zero(mask));
1369 }
1370 }
1371
1372 it=first;
1373 for(auto i=m;i--;++it){
1374 BOOST_UNORDERED_STATS_COUNTER(num_cmps);
1375 auto pos=positions[i];
1376 prober pb(pos);
1377 auto pg=this->arrays.groups()+pos;
1378 auto mask=masks[i];
1379 element_type *p;
1380 if(!mask)goto post_mask;
1381 p=this->arrays.elements()+pos*N;
1382 for(;;){
1383 {
1384 auto lck=access(access_mode,pos);
1385 do{
1386 auto n=unchecked_countr_zero(mask);
1387 if(BOOST_LIKELY(pg->is_occupied(n))){
1388 BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
1389 if(bool(this->pred()(*it,this->key_from(p[n])))){
1390 f(cast_for(access_mode,type_policy::value_from(p[n])));
1391 ++res;
1392 BOOST_UNORDERED_ADD_STATS(
1393 this->cstats.successful_lookup,(pb.length(),num_cmps));
1394 goto next_key;
1395 }
1396 }
1397 mask&=mask-1;
1398 }while(mask);
1399 }
1400 post_mask:
1401 do{
1402 if(BOOST_LIKELY(pg->is_not_overflowed(hashes[i]))||
1403 BOOST_UNLIKELY(!pb.next(this->arrays.groups_size_mask))){
1404 BOOST_UNORDERED_ADD_STATS(
1405 this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
1406 goto next_key;
1407 }
1408 pos=pb.get();
1409 pg=this->arrays.groups()+pos;
1410 mask=pg->match(hashes[i]);
1411 }while(!mask);
1412 p=this->arrays.elements()+pos*N;
1413 BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
1414 }
1415 next_key:;
1416 }
1417 return res;
1418 }
1419
1420 #if defined(BOOST_MSVC)
1421 #pragma warning(pop)
1422 #endif
1423
1424 std::size_t unprotected_size()const
1425 {
1426 std::size_t m=this->size_ctrl.ml;
1427 std::size_t s=this->size_ctrl.size;
1428 return s<=m?s:m;
1429 }
1430
1431 template<typename... Args>
1432 BOOST_FORCEINLINE bool construct_and_emplace(Args&&... args)
1433 {
1434 return construct_and_emplace_or_visit(
1435 group_shared{},[](const value_type&){},std::forward<Args>(args)...);
1436 }
1437
1438 struct call_construct_and_emplace_or_visit
1439 {
1440 template<typename... Args>
1441 BOOST_FORCEINLINE bool operator()(
1442 concurrent_table* this_,Args&&... args)const
1443 {
1444 return this_->construct_and_emplace_or_visit(
1445 std::forward<Args>(args)...);
1446 }
1447 };
1448
1449 template<typename GroupAccessMode,typename... Args>
1450 BOOST_FORCEINLINE bool construct_and_emplace_or_visit_flast(
1451 GroupAccessMode access_mode,Args&&... args)
1452 {
1453 return mp11::tuple_apply(
1454 call_construct_and_emplace_or_visit{},
1455 std::tuple_cat(
1456 std::make_tuple(this,access_mode),
1457 tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...))
1458 )
1459 );
1460 }
1461
1462 struct call_construct_and_emplace_and_visit
1463 {
1464 template<typename... Args>
1465 BOOST_FORCEINLINE bool operator()(
1466 concurrent_table* this_,Args&&... args)const
1467 {
1468 return this_->construct_and_emplace_and_visit(
1469 std::forward<Args>(args)...);
1470 }
1471 };
1472
1473 template<typename GroupAccessMode,typename... Args>
1474 BOOST_FORCEINLINE bool construct_and_emplace_and_visit_flast(
1475 GroupAccessMode access_mode,Args&&... args)
1476 {
1477 return mp11::tuple_apply(
1478 call_construct_and_emplace_and_visit{},
1479 std::tuple_cat(
1480 std::make_tuple(this,access_mode),
1481 tuple_rotate_right<2>(
1482 std::forward_as_tuple(std::forward<Args>(args)...))
1483 )
1484 );
1485 }
1486
1487 template<typename GroupAccessMode,typename F,typename... Args>
1488 BOOST_FORCEINLINE bool construct_and_emplace_or_visit(
1489 GroupAccessMode access_mode,F&& f,Args&&... args)
1490 {
1491 return construct_and_emplace_and_visit(
1492 access_mode,[](const value_type&){},std::forward<F>(f),
1493 std::forward<Args>(args)...);
1494 }
1495
1496 template<typename GroupAccessMode,typename F1,typename F2,typename... Args>
1497 BOOST_FORCEINLINE bool construct_and_emplace_and_visit(
1498 GroupAccessMode access_mode,F1&& f1,F2&& f2,Args&&... args)
1499 {
1500 auto lck=shared_access();
1501
1502 alloc_cted_insert_type<type_policy,Allocator,Args...> x(
1503 this->al(),std::forward<Args>(args)...);
1504 int res=unprotected_norehash_emplace_and_visit(
1505 access_mode,std::forward<F1>(f1),std::forward<F2>(f2),
1506 type_policy::move(x.value()));
1507 if(BOOST_LIKELY(res>=0))return res!=0;
1508
1509 lck.unlock();
1510
1511 rehash_if_full();
1512 return noinline_emplace_and_visit(
1513 access_mode,std::forward<F1>(f1),std::forward<F2>(f2),
1514 type_policy::move(x.value()));
1515 }
1516
1517 template<typename... Args>
1518 BOOST_FORCEINLINE bool emplace_impl(Args&&... args)
1519 {
1520 return emplace_or_visit_impl(
1521 group_shared{},[](const value_type&){},std::forward<Args>(args)...);
1522 }
1523
1524 template<typename GroupAccessMode,typename F,typename... Args>
1525 BOOST_NOINLINE bool noinline_emplace_or_visit(
1526 GroupAccessMode access_mode,F&& f,Args&&... args)
1527 {
1528 return emplace_or_visit_impl(
1529 access_mode,std::forward<F>(f),std::forward<Args>(args)...);
1530 }
1531
1532 template<typename GroupAccessMode,typename F1,typename F2,typename... Args>
1533 BOOST_NOINLINE bool noinline_emplace_and_visit(
1534 GroupAccessMode access_mode,F1&& f1,F2&& f2,Args&&... args)
1535 {
1536 return emplace_and_visit_impl(
1537 access_mode,std::forward<F1>(f1),std::forward<F2>(f2),
1538 std::forward<Args>(args)...);
1539 }
1540
1541 struct call_emplace_or_visit_impl
1542 {
1543 template<typename... Args>
1544 BOOST_FORCEINLINE bool operator()(
1545 concurrent_table* this_,Args&&... args)const
1546 {
1547 return this_->emplace_or_visit_impl(std::forward<Args>(args)...);
1548 }
1549 };
1550
1551 template<typename GroupAccessMode,typename... Args>
1552 BOOST_FORCEINLINE bool emplace_or_visit_flast(
1553 GroupAccessMode access_mode,Args&&... args)
1554 {
1555 return mp11::tuple_apply(
1556 call_emplace_or_visit_impl{},
1557 std::tuple_cat(
1558 std::make_tuple(this,access_mode),
1559 tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...))
1560 )
1561 );
1562 }
1563
1564 struct call_emplace_and_visit_impl
1565 {
1566 template<typename... Args>
1567 BOOST_FORCEINLINE bool operator()(
1568 concurrent_table* this_,Args&&... args)const
1569 {
1570 return this_->emplace_and_visit_impl(std::forward<Args>(args)...);
1571 }
1572 };
1573
1574 template<typename GroupAccessMode,typename... Args>
1575 BOOST_FORCEINLINE bool emplace_and_visit_flast(
1576 GroupAccessMode access_mode,Args&&... args)
1577 {
1578 return mp11::tuple_apply(
1579 call_emplace_and_visit_impl{},
1580 std::tuple_cat(
1581 std::make_tuple(this,access_mode),
1582 tuple_rotate_right<2>(
1583 std::forward_as_tuple(std::forward<Args>(args)...))
1584 )
1585 );
1586 }
1587
1588 template<typename GroupAccessMode,typename F,typename... Args>
1589 BOOST_FORCEINLINE bool emplace_or_visit_impl(
1590 GroupAccessMode access_mode,F&& f,Args&&... args)
1591 {
1592 return emplace_and_visit_impl(
1593 access_mode,[](const value_type&){},std::forward<F>(f),
1594 std::forward<Args>(args)...);
1595 }
1596
1597 template<typename GroupAccessMode,typename F1,typename F2,typename... Args>
1598 BOOST_FORCEINLINE bool emplace_and_visit_impl(
1599 GroupAccessMode access_mode,F1&& f1,F2&& f2,Args&&... args)
1600 {
1601 for(;;){
1602 {
1603 auto lck=shared_access();
1604 int res=unprotected_norehash_emplace_and_visit(
1605 access_mode,std::forward<F1>(f1),std::forward<F2>(f2),
1606 std::forward<Args>(args)...);
1607 if(BOOST_LIKELY(res>=0))return res!=0;
1608 }
1609 rehash_if_full();
1610 }
1611 }
1612
1613 template<typename... Args>
1614 BOOST_FORCEINLINE bool unprotected_emplace(Args&&... args)
1615 {
1616 const auto &k=this->key_from(std::forward<Args>(args)...);
1617 auto hash=this->hash_for(k);
1618 auto pos0=this->position_for(hash);
1619
1620 if(this->find(k,pos0,hash))return false;
1621
1622 if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){
1623 this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...);
1624 }
1625 else{
1626 this->unchecked_emplace_with_rehash(hash,std::forward<Args>(args)...);
1627 }
1628 return true;
1629 }
1630
1631 template<typename GroupAccessMode,typename F,typename... Args>
1632 BOOST_FORCEINLINE int
1633 unprotected_norehash_emplace_or_visit(
1634 GroupAccessMode access_mode,F&& f,Args&&... args)
1635 {
1636 return unprotected_norehash_emplace_and_visit(
1637 access_mode,[&](const value_type&){},
1638 std::forward<F>(f),std::forward<Args>(args)...);
1639 }
1640
1641 struct reserve_size
1642 {
1643 reserve_size(concurrent_table& x_):x(x_)
1644 {
1645 size_=++x.size_ctrl.size;
1646 }
1647
1648 ~reserve_size()
1649 {
1650 if(!commit_)--x.size_ctrl.size;
1651 }
1652
1653 bool succeeded()const{return size_<=x.size_ctrl.ml;}
1654
1655 void commit(){commit_=true;}
1656
1657 concurrent_table &x;
1658 std::size_t size_;
1659 bool commit_=false;
1660 };
1661
1662 struct reserve_slot
1663 {
1664 reserve_slot(group_type* pg_,std::size_t pos_,std::size_t hash):
1665 pg{pg_},pos{pos_}
1666 {
1667 pg->set(pos,hash);
1668 }
1669
1670 ~reserve_slot()
1671 {
1672 if(!commit_)pg->reset(pos);
1673 }
1674
1675 void commit(){commit_=true;}
1676
1677 group_type *pg;
1678 std::size_t pos;
1679 bool commit_=false;
1680 };
1681
1682 template<typename GroupAccessMode,typename F1,typename F2,typename... Args>
1683 BOOST_FORCEINLINE int
1684 unprotected_norehash_emplace_and_visit(
1685 GroupAccessMode access_mode,F1&& f1,F2&& f2,Args&&... args)
1686 {
1687 const auto &k=this->key_from(std::forward<Args>(args)...);
1688 auto hash=this->hash_for(k);
1689 auto pos0=this->position_for(hash);
1690
1691 for(;;){
1692 startover:
1693 boost::uint32_t counter=insert_counter(pos0);
1694 if(unprotected_visit(
1695 access_mode,k,pos0,hash,std::forward<F2>(f2)))return 0;
1696
1697 reserve_size rsize(*this);
1698 if(BOOST_LIKELY(rsize.succeeded())){
1699 for(prober pb(pos0);;pb.next(this->arrays.groups_size_mask)){
1700 auto pos=pb.get();
1701 auto pg=this->arrays.groups()+pos;
1702 auto lck=access(group_exclusive{},pos);
1703 auto mask=pg->match_available();
1704 if(BOOST_LIKELY(mask!=0)){
1705 auto n=unchecked_countr_zero(mask);
1706 reserve_slot rslot{pg,n,hash};
1707 if(BOOST_UNLIKELY(insert_counter(pos0)++!=counter)){
1708
1709 goto startover;
1710 }
1711 auto p=this->arrays.elements()+pos*N+n;
1712 this->construct_element(p,std::forward<Args>(args)...);
1713 rslot.commit();
1714 rsize.commit();
1715 f1(cast_for(group_exclusive{},type_policy::value_from(*p)));
1716 BOOST_UNORDERED_ADD_STATS(this->cstats.insertion,(pb.length()));
1717 return 1;
1718 }
1719 pg->mark_overflow(hash);
1720 }
1721 }
1722 else return -1;
1723 }
1724 }
1725
1726 void rehash_if_full()
1727 {
1728 auto lck=exclusive_access();
1729 if(this->size_ctrl.size==this->size_ctrl.ml){
1730 this->unchecked_rehash_for_growth();
1731 }
1732 }
1733
1734 template<typename GroupAccessMode,typename F>
1735 auto for_all_elements(GroupAccessMode access_mode,F f)const
1736 ->decltype(f(nullptr),void())
1737 {
1738 for_all_elements(
1739 access_mode,[&](group_type*,unsigned int,element_type* p){f(p);});
1740 }
1741
1742 template<typename GroupAccessMode,typename F>
1743 auto for_all_elements(GroupAccessMode access_mode,F f)const
1744 ->decltype(f(nullptr,0,nullptr),void())
1745 {
1746 for_all_elements_while(
1747 access_mode,[&](group_type* pg,unsigned int n,element_type* p)
1748 {f(pg,n,p);return true;});
1749 }
1750
1751 template<typename GroupAccessMode,typename F>
1752 auto for_all_elements_while(GroupAccessMode access_mode,F f)const
1753 ->decltype(f(nullptr),bool())
1754 {
1755 return for_all_elements_while(
1756 access_mode,[&](group_type*,unsigned int,element_type* p){return f(p);});
1757 }
1758
1759 template<typename GroupAccessMode,typename F>
1760 auto for_all_elements_while(GroupAccessMode access_mode,F f)const
1761 ->decltype(f(nullptr,0,nullptr),bool())
1762 {
1763 auto p=this->arrays.elements();
1764 if(p){
1765 for(auto pg=this->arrays.groups(),last=pg+this->arrays.groups_size_mask+1;
1766 pg!=last;++pg,p+=N){
1767 auto lck=access(access_mode,(std::size_t)(pg-this->arrays.groups()));
1768 auto mask=this->match_really_occupied(pg,last);
1769 while(mask){
1770 auto n=unchecked_countr_zero(mask);
1771 if(!f(pg,n,p+n))return false;
1772 mask&=mask-1;
1773 }
1774 }
1775 }
1776 return true;
1777 }
1778
1779 #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
1780 template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1781 auto for_all_elements(
1782 GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
1783 ->decltype(f(nullptr),void())
1784 {
1785 for_all_elements(
1786 access_mode,std::forward<ExecutionPolicy>(policy),
1787 [&](group_type*,unsigned int,element_type* p){f(p);});
1788 }
1789
1790 template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1791 auto for_all_elements(
1792 GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
1793 ->decltype(f(nullptr,0,nullptr),void())
1794 {
1795 if(!this->arrays.elements())return;
1796 auto first=this->arrays.groups(),
1797 last=first+this->arrays.groups_size_mask+1;
1798 std::for_each(std::forward<ExecutionPolicy>(policy),first,last,
1799 [&,this](group_type& g){
1800 auto pos=static_cast<std::size_t>(&g-first);
1801 auto p=this->arrays.elements()+pos*N;
1802 auto lck=access(access_mode,pos);
1803 auto mask=this->match_really_occupied(&g,last);
1804 while(mask){
1805 auto n=unchecked_countr_zero(mask);
1806 f(&g,n,p+n);
1807 mask&=mask-1;
1808 }
1809 }
1810 );
1811 }
1812
1813 template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
1814 bool for_all_elements_while(
1815 GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
1816 {
1817 if(!this->arrays.elements())return true;
1818 auto first=this->arrays.groups(),
1819 last=first+this->arrays.groups_size_mask+1;
1820 return std::all_of(std::forward<ExecutionPolicy>(policy),first,last,
1821 [&,this](group_type& g){
1822 auto pos=static_cast<std::size_t>(&g-first);
1823 auto p=this->arrays.elements()+pos*N;
1824 auto lck=access(access_mode,pos);
1825 auto mask=this->match_really_occupied(&g,last);
1826 while(mask){
1827 auto n=unchecked_countr_zero(mask);
1828 if(!f(p+n))return false;
1829 mask&=mask-1;
1830 }
1831 return true;
1832 }
1833 );
1834 }
1835 #endif
1836
1837 friend class boost::serialization::access;
1838
1839 template<typename Archive>
1840 void serialize(Archive& ar,unsigned int version)
1841 {
1842 core::split_member(ar,*this,version);
1843 }
1844
1845 template<typename Archive>
1846 void save(Archive& ar,unsigned int version)const
1847 {
1848 save(
1849 ar,version,
1850 std::integral_constant<bool,std::is_same<key_type,value_type>::value>{});
1851 }
1852
1853 template<typename Archive>
1854 void save(Archive& ar,unsigned int,std::true_type )const
1855 {
1856 auto lck=exclusive_access();
1857 const std::size_t s=super::size();
1858 const serialization_version<value_type> value_version;
1859
1860 ar<<core::make_nvp("count",s);
1861 ar<<core::make_nvp("value_version",value_version);
1862
1863 super::for_all_elements([&,this](element_type* p){
1864 auto& x=type_policy::value_from(*p);
1865 core::save_construct_data_adl(ar,std::addressof(x),value_version);
1866 ar<<serialization::make_nvp("item",x);
1867 });
1868 }
1869
1870 template<typename Archive>
1871 void save(Archive& ar,unsigned int,std::false_type )const
1872 {
1873 using raw_key_type=typename std::remove_const<key_type>::type;
1874 using raw_mapped_type=typename std::remove_const<
1875 typename TypePolicy::mapped_type>::type;
1876
1877 auto lck=exclusive_access();
1878 const std::size_t s=super::size();
1879 const serialization_version<raw_key_type> key_version;
1880 const serialization_version<raw_mapped_type> mapped_version;
1881
1882 ar<<core::make_nvp("count",s);
1883 ar<<core::make_nvp("key_version",key_version);
1884 ar<<core::make_nvp("mapped_version",mapped_version);
1885
1886 super::for_all_elements([&,this](element_type* p){
1887
1888
1889
1890
1891
1892
1893 auto& x=type_policy::value_from(*p);
1894 core::save_construct_data_adl(
1895 ar,std::addressof(x.first),key_version);
1896 ar<<serialization::make_nvp("key",x.first);
1897 core::save_construct_data_adl(
1898 ar,std::addressof(x.second),mapped_version);
1899 ar<<serialization::make_nvp("mapped",x.second);
1900 });
1901 }
1902
1903 template<typename Archive>
1904 void load(Archive& ar,unsigned int version)
1905 {
1906 load(
1907 ar,version,
1908 std::integral_constant<bool,std::is_same<key_type,value_type>::value>{});
1909 }
1910
1911 template<typename Archive>
1912 void load(Archive& ar,unsigned int,std::true_type )
1913 {
1914 auto lck=exclusive_access();
1915 std::size_t s;
1916 serialization_version<value_type> value_version;
1917
1918 ar>>core::make_nvp("count",s);
1919 ar>>core::make_nvp("value_version",value_version);
1920
1921 super::clear();
1922 super::reserve(s);
1923
1924 for(std::size_t n=0;n<s;++n){
1925 archive_constructed<value_type> value("item",ar,value_version);
1926 auto& x=value.get();
1927 auto hash=this->hash_for(x);
1928 auto pos0=this->position_for(hash);
1929
1930 if(this->find(x,pos0,hash))throw_exception(bad_archive_exception());
1931 auto loc=this->unchecked_emplace_at(pos0,hash,std::move(x));
1932 ar.reset_object_address(
1933 std::addressof(type_policy::value_from(*loc.p)),std::addressof(x));
1934 }
1935 }
1936
1937 template<typename Archive>
1938 void load(Archive& ar,unsigned int,std::false_type )
1939 {
1940 using raw_key_type=typename std::remove_const<key_type>::type;
1941 using raw_mapped_type=typename std::remove_const<
1942 typename type_policy::mapped_type>::type;
1943
1944 auto lck=exclusive_access();
1945 std::size_t s;
1946 serialization_version<raw_key_type> key_version;
1947 serialization_version<raw_mapped_type> mapped_version;
1948
1949 ar>>core::make_nvp("count",s);
1950 ar>>core::make_nvp("key_version",key_version);
1951 ar>>core::make_nvp("mapped_version",mapped_version);
1952
1953 super::clear();
1954 super::reserve(s);
1955
1956 for(std::size_t n=0;n<s;++n){
1957 archive_constructed<raw_key_type> key("key",ar,key_version);
1958 archive_constructed<raw_mapped_type> mapped("mapped",ar,mapped_version);
1959 auto& k=key.get();
1960 auto& m=mapped.get();
1961 auto hash=this->hash_for(k);
1962 auto pos0=this->position_for(hash);
1963
1964 if(this->find(k,pos0,hash))throw_exception(bad_archive_exception());
1965 auto loc=this->unchecked_emplace_at(pos0,hash,std::move(k),std::move(m));
1966 ar.reset_object_address(
1967 std::addressof(type_policy::value_from(*loc.p).first),
1968 std::addressof(k));
1969 ar.reset_object_address(
1970 std::addressof(type_policy::value_from(*loc.p).second),
1971 std::addressof(m));
1972 }
1973 }
1974
1975 static std::atomic<std::size_t> thread_counter;
1976 mutable multimutex_type mutexes;
1977 };
1978
1979 template<typename T,typename H,typename P,typename A>
1980 std::atomic<std::size_t> concurrent_table<T,H,P,A>::thread_counter={};
1981
1982 #if defined(BOOST_MSVC)
1983 #pragma warning(pop)
1984 #endif
1985
1986 #include <boost/unordered/detail/foa/restore_wshadow.hpp>
1987
1988 }
1989 }
1990 }
1991 }
1992
1993 #endif