File indexing completed on 2025-12-18 10:24:16
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef __TBB_detail__concurrent_unordered_base_H
0018 #define __TBB_detail__concurrent_unordered_base_H
0019
0020 #if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H)
0021 #error Do not #include this internal file directly; use public TBB headers instead.
0022 #endif
0023
0024 #include "_range_common.h"
0025 #include "_containers_helpers.h"
0026 #include "_segment_table.h"
0027 #include "_hash_compare.h"
0028 #include "_allocator_traits.h"
0029 #include "_node_handle.h"
0030 #include "_assert.h"
0031 #include "_utils.h"
0032 #include "_exception.h"
0033 #include <iterator>
0034 #include <utility>
0035 #include <functional>
0036 #include <initializer_list>
0037 #include <atomic>
0038 #include <type_traits>
0039 #include <memory>
0040 #include <algorithm>
0041
0042 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
0043 #pragma warning(push)
0044 #pragma warning(disable: 4127)
0045 #endif
0046
0047 namespace tbb {
0048 namespace detail {
0049 namespace d2 {
0050
0051 template <typename Traits>
0052 class concurrent_unordered_base;
0053
0054 template<typename Container, typename Value>
0055 class solist_iterator {
0056 private:
0057 using node_ptr = typename Container::value_node_ptr;
0058 template <typename T, typename Allocator>
0059 friend class split_ordered_list;
0060 template<typename M, typename V>
0061 friend class solist_iterator;
0062 template <typename Traits>
0063 friend class concurrent_unordered_base;
0064 template<typename M, typename T, typename U>
0065 friend bool operator==( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
0066 template<typename M, typename T, typename U>
0067 friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
0068 public:
0069 using value_type = Value;
0070 using difference_type = typename Container::difference_type;
0071 using pointer = value_type*;
0072 using reference = value_type&;
0073 using iterator_category = std::forward_iterator_tag;
0074
0075 solist_iterator() : my_node_ptr(nullptr) {}
0076 solist_iterator( const solist_iterator<Container, typename Container::value_type>& other )
0077 : my_node_ptr(other.my_node_ptr) {}
0078
0079 solist_iterator& operator=( const solist_iterator<Container, typename Container::value_type>& other ) {
0080 my_node_ptr = other.my_node_ptr;
0081 return *this;
0082 }
0083
0084 reference operator*() const {
0085 return my_node_ptr->value();
0086 }
0087
0088 pointer operator->() const {
0089 return my_node_ptr->storage();
0090 }
0091
0092 solist_iterator& operator++() {
0093 auto next_node = my_node_ptr->next();
0094 while(next_node && next_node->is_dummy()) {
0095 next_node = next_node->next();
0096 }
0097 my_node_ptr = static_cast<node_ptr>(next_node);
0098 return *this;
0099 }
0100
0101 solist_iterator operator++(int) {
0102 solist_iterator tmp = *this;
0103 ++*this;
0104 return tmp;
0105 }
0106
0107 private:
0108 solist_iterator( node_ptr pnode ) : my_node_ptr(pnode) {}
0109
0110 node_ptr get_node_ptr() const { return my_node_ptr; }
0111
0112 node_ptr my_node_ptr;
0113 };
0114
0115 template<typename Solist, typename T, typename U>
0116 bool operator==( const solist_iterator<Solist, T>& i, const solist_iterator<Solist, U>& j ) {
0117 return i.my_node_ptr == j.my_node_ptr;
0118 }
0119
0120 template<typename Solist, typename T, typename U>
0121 bool operator!=( const solist_iterator<Solist, T>& i, const solist_iterator<Solist, U>& j ) {
0122 return i.my_node_ptr != j.my_node_ptr;
0123 }
0124
0125 template <typename SokeyType>
0126 class list_node {
0127 public:
0128 using node_ptr = list_node*;
0129 using sokey_type = SokeyType;
0130
0131 list_node(sokey_type key) : my_next(nullptr), my_order_key(key) {}
0132
0133 void init( sokey_type key ) {
0134 my_order_key = key;
0135 }
0136
0137 sokey_type order_key() const {
0138 return my_order_key;
0139 }
0140
0141 bool is_dummy() {
0142
0143 return (my_order_key & 0x1) == 0;
0144 }
0145
0146 node_ptr next() const {
0147 return my_next.load(std::memory_order_acquire);
0148 }
0149
0150 void set_next( node_ptr next_node ) {
0151 my_next.store(next_node, std::memory_order_release);
0152 }
0153
0154 bool try_set_next( node_ptr expected_next, node_ptr new_next ) {
0155 return my_next.compare_exchange_strong(expected_next, new_next);
0156 }
0157
0158 private:
0159 std::atomic<node_ptr> my_next;
0160 sokey_type my_order_key;
0161 };
0162
0163 template <typename ValueType, typename SokeyType>
0164 class value_node : public list_node<SokeyType>
0165 {
0166 public:
0167 using base_type = list_node<SokeyType>;
0168 using sokey_type = typename base_type::sokey_type;
0169 using value_type = ValueType;
0170
0171 value_node( sokey_type ord_key ) : base_type(ord_key) {}
0172 ~value_node() {}
0173 value_type* storage() {
0174 return &my_value;
0175 }
0176
0177 value_type& value() {
0178 return *storage();
0179 }
0180
0181 private:
0182 union {
0183 value_type my_value;
0184 };
0185 };
0186
0187 template <typename Traits>
0188 class concurrent_unordered_base {
0189 using self_type = concurrent_unordered_base<Traits>;
0190 using traits_type = Traits;
0191 using hash_compare_type = typename traits_type::hash_compare_type;
0192 class unordered_segment_table;
0193 public:
0194 using value_type = typename traits_type::value_type;
0195 using key_type = typename traits_type::key_type;
0196 using allocator_type = typename traits_type::allocator_type;
0197
0198 private:
0199 using allocator_traits_type = tbb::detail::allocator_traits<allocator_type>;
0200
0201 static_assert(std::is_same<typename allocator_traits_type::value_type, value_type>::value,
0202 "value_type of the container must be the same as its allocator");
0203 using sokey_type = std::size_t;
0204
0205 public:
0206 using size_type = std::size_t;
0207 using difference_type = std::ptrdiff_t;
0208
0209 using iterator = solist_iterator<self_type, value_type>;
0210 using const_iterator = solist_iterator<self_type, const value_type>;
0211 using local_iterator = iterator;
0212 using const_local_iterator = const_iterator;
0213
0214 using reference = value_type&;
0215 using const_reference = const value_type&;
0216 using pointer = typename allocator_traits_type::pointer;
0217 using const_pointer = typename allocator_traits_type::const_pointer;
0218
0219 using hasher = typename hash_compare_type::hasher;
0220 using key_equal = typename hash_compare_type::key_equal;
0221
0222 private:
0223 using list_node_type = list_node<sokey_type>;
0224 using value_node_type = value_node<value_type, sokey_type>;
0225 using node_ptr = list_node_type*;
0226 using value_node_ptr = value_node_type*;
0227
0228 using value_node_allocator_type = typename allocator_traits_type::template rebind_alloc<value_node_type>;
0229 using node_allocator_type = typename allocator_traits_type::template rebind_alloc<list_node_type>;
0230
0231 using node_allocator_traits = tbb::detail::allocator_traits<node_allocator_type>;
0232 using value_node_allocator_traits = tbb::detail::allocator_traits<value_node_allocator_type>;
0233
0234 static constexpr size_type round_up_to_power_of_two( size_type bucket_count ) {
0235 return size_type(1) << size_type(tbb::detail::log2(uintptr_t(bucket_count == 0 ? 1 : bucket_count) * 2 - 1));
0236 }
0237
0238 template <typename T>
0239 using is_transparent = dependent_bool<has_transparent_key_equal<key_type, hasher, key_equal>, T>;
0240 public:
0241 using node_type = d1::node_handle<key_type, value_type, value_node_type, allocator_type>;
0242
0243 explicit concurrent_unordered_base( size_type bucket_count, const hasher& hash = hasher(),
0244 const key_equal& equal = key_equal(), const allocator_type& alloc = allocator_type() )
0245 : my_size(0),
0246 my_bucket_count(round_up_to_power_of_two(bucket_count)),
0247 my_max_load_factor(float(initial_max_load_factor)),
0248 my_hash_compare(hash, equal),
0249 my_head(sokey_type(0)),
0250 my_segments(alloc) {}
0251
0252 concurrent_unordered_base() : concurrent_unordered_base(initial_bucket_count) {}
0253
0254 concurrent_unordered_base( size_type bucket_count, const allocator_type& alloc )
0255 : concurrent_unordered_base(bucket_count, hasher(), key_equal(), alloc) {}
0256
0257 concurrent_unordered_base( size_type bucket_count, const hasher& hash, const allocator_type& alloc )
0258 : concurrent_unordered_base(bucket_count, hash, key_equal(), alloc) {}
0259
0260 explicit concurrent_unordered_base( const allocator_type& alloc )
0261 : concurrent_unordered_base(initial_bucket_count, hasher(), key_equal(), alloc) {}
0262
0263 template <typename InputIterator>
0264 concurrent_unordered_base( InputIterator first, InputIterator last,
0265 size_type bucket_count = initial_bucket_count, const hasher& hash = hasher(),
0266 const key_equal& equal = key_equal(), const allocator_type& alloc = allocator_type() )
0267 : concurrent_unordered_base(bucket_count, hash, equal, alloc)
0268 {
0269 insert(first, last);
0270 }
0271
0272 template <typename InputIterator>
0273 concurrent_unordered_base( InputIterator first, InputIterator last,
0274 size_type bucket_count, const allocator_type& alloc )
0275 : concurrent_unordered_base(first, last, bucket_count, hasher(), key_equal(), alloc) {}
0276
0277 template <typename InputIterator>
0278 concurrent_unordered_base( InputIterator first, InputIterator last,
0279 size_type bucket_count, const hasher& hash, const allocator_type& alloc )
0280 : concurrent_unordered_base(first, last, bucket_count, hash, key_equal(), alloc) {}
0281
0282 concurrent_unordered_base( const concurrent_unordered_base& other )
0283 : my_size(other.my_size.load(std::memory_order_relaxed)),
0284 my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
0285 my_max_load_factor(other.my_max_load_factor),
0286 my_hash_compare(other.my_hash_compare),
0287 my_head(other.my_head.order_key()),
0288 my_segments(other.my_segments)
0289 {
0290 try_call( [&] {
0291 internal_copy(other);
0292 } ).on_exception( [&] {
0293 clear();
0294 });
0295 }
0296
0297 concurrent_unordered_base( const concurrent_unordered_base& other, const allocator_type& alloc )
0298 : my_size(other.my_size.load(std::memory_order_relaxed)),
0299 my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
0300 my_max_load_factor(other.my_max_load_factor),
0301 my_hash_compare(other.my_hash_compare),
0302 my_head(other.my_head.order_key()),
0303 my_segments(other.my_segments, alloc)
0304 {
0305 try_call( [&] {
0306 internal_copy(other);
0307 } ).on_exception( [&] {
0308 clear();
0309 });
0310 }
0311
0312 concurrent_unordered_base( concurrent_unordered_base&& other )
0313 : my_size(other.my_size.load(std::memory_order_relaxed)),
0314 my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
0315 my_max_load_factor(std::move(other.my_max_load_factor)),
0316 my_hash_compare(std::move(other.my_hash_compare)),
0317 my_head(other.my_head.order_key()),
0318 my_segments(std::move(other.my_segments))
0319 {
0320 move_content(std::move(other));
0321 }
0322
0323 concurrent_unordered_base( concurrent_unordered_base&& other, const allocator_type& alloc )
0324 : my_size(other.my_size.load(std::memory_order_relaxed)),
0325 my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
0326 my_max_load_factor(std::move(other.my_max_load_factor)),
0327 my_hash_compare(std::move(other.my_hash_compare)),
0328 my_head(other.my_head.order_key()),
0329 my_segments(std::move(other.my_segments), alloc)
0330 {
0331 using is_always_equal = typename allocator_traits_type::is_always_equal;
0332 internal_move_construct_with_allocator(std::move(other), alloc, is_always_equal());
0333 }
0334
0335 concurrent_unordered_base( std::initializer_list<value_type> init,
0336 size_type bucket_count = initial_bucket_count,
0337 const hasher& hash = hasher(), const key_equal& equal = key_equal(),
0338 const allocator_type& alloc = allocator_type() )
0339 : concurrent_unordered_base(init.begin(), init.end(), bucket_count, hash, equal, alloc) {}
0340
0341 concurrent_unordered_base( std::initializer_list<value_type> init,
0342 size_type bucket_count, const allocator_type& alloc )
0343 : concurrent_unordered_base(init, bucket_count, hasher(), key_equal(), alloc) {}
0344
0345 concurrent_unordered_base( std::initializer_list<value_type> init,
0346 size_type bucket_count, const hasher& hash, const allocator_type& alloc )
0347 : concurrent_unordered_base(init, bucket_count, hash, key_equal(), alloc) {}
0348
0349 ~concurrent_unordered_base() {
0350 internal_clear();
0351 }
0352
0353 concurrent_unordered_base& operator=( const concurrent_unordered_base& other ) {
0354 if (this != &other) {
0355 clear();
0356 my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
0357 my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed);
0358 my_max_load_factor = other.my_max_load_factor;
0359 my_hash_compare = other.my_hash_compare;
0360 my_segments = other.my_segments;
0361 internal_copy(other);
0362 }
0363 return *this;
0364 }
0365
0366 concurrent_unordered_base& operator=( concurrent_unordered_base&& other ) noexcept(unordered_segment_table::is_noexcept_assignment) {
0367 if (this != &other) {
0368 clear();
0369 my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
0370 my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed);
0371 my_max_load_factor = std::move(other.my_max_load_factor);
0372 my_hash_compare = std::move(other.my_hash_compare);
0373 my_segments = std::move(other.my_segments);
0374
0375 using pocma_type = typename allocator_traits_type::propagate_on_container_move_assignment;
0376 using is_always_equal = typename allocator_traits_type::is_always_equal;
0377 internal_move_assign(std::move(other), tbb::detail::disjunction<pocma_type, is_always_equal>());
0378 }
0379 return *this;
0380 }
0381
0382 concurrent_unordered_base& operator=( std::initializer_list<value_type> init ) {
0383 clear();
0384 insert(init);
0385 return *this;
0386 }
0387
0388 void swap( concurrent_unordered_base& other ) noexcept(unordered_segment_table::is_noexcept_swap) {
0389 if (this != &other) {
0390 using pocs_type = typename allocator_traits_type::propagate_on_container_swap;
0391 using is_always_equal = typename allocator_traits_type::is_always_equal;
0392 internal_swap(other, tbb::detail::disjunction<pocs_type, is_always_equal>());
0393 }
0394 }
0395
0396 allocator_type get_allocator() const noexcept { return my_segments.get_allocator(); }
0397
0398 iterator begin() noexcept { return iterator(first_value_node(&my_head)); }
0399 const_iterator begin() const noexcept { return const_iterator(first_value_node(const_cast<node_ptr>(&my_head))); }
0400 const_iterator cbegin() const noexcept { return const_iterator(first_value_node(const_cast<node_ptr>(&my_head))); }
0401
0402 iterator end() noexcept { return iterator(nullptr); }
0403 const_iterator end() const noexcept { return const_iterator(nullptr); }
0404 const_iterator cend() const noexcept { return const_iterator(nullptr); }
0405
0406 __TBB_nodiscard bool empty() const noexcept { return size() == 0; }
0407 size_type size() const noexcept { return my_size.load(std::memory_order_relaxed); }
0408 size_type max_size() const noexcept { return allocator_traits_type::max_size(get_allocator()); }
0409
0410 void clear() noexcept {
0411 internal_clear();
0412 }
0413
0414 std::pair<iterator, bool> insert( const value_type& value ) {
0415 return internal_insert_value(value);
0416 }
0417
0418 std::pair<iterator, bool> insert( value_type&& value ) {
0419 return internal_insert_value(std::move(value));
0420 }
0421
0422 iterator insert( const_iterator, const value_type& value ) {
0423
0424 return insert(value).first;
0425 }
0426
0427 iterator insert( const_iterator, value_type&& value ) {
0428
0429 return insert(std::move(value)).first;
0430 }
0431
0432 template <typename InputIterator>
0433 void insert( InputIterator first, InputIterator last ) {
0434 for (; first != last; ++first) {
0435 insert(*first);
0436 }
0437 }
0438
0439 void insert( std::initializer_list<value_type> init ) {
0440 insert(init.begin(), init.end());
0441 }
0442
0443 std::pair<iterator, bool> insert( node_type&& nh ) {
0444 if (!nh.empty()) {
0445 value_node_ptr insert_node = d1::node_handle_accessor::get_node_ptr(nh);
0446 auto init_node = [&insert_node]( sokey_type order_key )->value_node_ptr {
0447 insert_node->init(order_key);
0448 return insert_node;
0449 };
0450 auto insert_result = internal_insert(insert_node->value(), init_node);
0451 if (insert_result.inserted) {
0452
0453 __TBB_ASSERT(insert_result.remaining_node == nullptr,
0454 "internal_insert_node should not return the remaining node if the insertion succeeded");
0455 d1::node_handle_accessor::deactivate(nh);
0456 }
0457 return { iterator(insert_result.node_with_equal_key), insert_result.inserted };
0458 }
0459 return {end(), false};
0460 }
0461
0462 iterator insert( const_iterator, node_type&& nh ) {
0463
0464 return insert(std::move(nh)).first;
0465 }
0466
0467 template <typename... Args>
0468 std::pair<iterator, bool> emplace( Args&&... args ) {
0469
0470
0471 value_node_ptr insert_node = create_node(0, std::forward<Args>(args)...);
0472
0473 auto init_node = [&insert_node]( sokey_type order_key )->value_node_ptr {
0474 insert_node->init(order_key);
0475 return insert_node;
0476 };
0477
0478 auto insert_result = internal_insert(insert_node->value(), init_node);
0479
0480 if (!insert_result.inserted) {
0481
0482 insert_node->init(split_order_key_regular(1));
0483 destroy_node(insert_node);
0484 }
0485
0486 return { iterator(insert_result.node_with_equal_key), insert_result.inserted };
0487 }
0488
0489 template <typename... Args>
0490 iterator emplace_hint( const_iterator, Args&&... args ) {
0491
0492 return emplace(std::forward<Args>(args)...).first;
0493 }
0494
0495 iterator unsafe_erase( const_iterator pos ) {
0496 return iterator(first_value_node(internal_erase(pos.get_node_ptr())));
0497 }
0498
0499 iterator unsafe_erase( iterator pos ) {
0500 return iterator(first_value_node(internal_erase(pos.get_node_ptr())));
0501 }
0502
0503 iterator unsafe_erase( const_iterator first, const_iterator last ) {
0504 while(first != last) {
0505 first = unsafe_erase(first);
0506 }
0507 return iterator(first.get_node_ptr());
0508 }
0509
0510 size_type unsafe_erase( const key_type& key ) {
0511 return internal_erase_by_key(key);
0512 }
0513
0514 template <typename K>
0515 typename std::enable_if<is_transparent<K>::value
0516 && !std::is_convertible<K, const_iterator>::value
0517 && !std::is_convertible<K, iterator>::value,
0518 size_type>::type unsafe_erase( const K& key )
0519 {
0520 return internal_erase_by_key(key);
0521 }
0522
0523 node_type unsafe_extract( const_iterator pos ) {
0524 internal_extract(pos.get_node_ptr());
0525 return d1::node_handle_accessor::construct<node_type>(pos.get_node_ptr());
0526 }
0527
0528 node_type unsafe_extract( iterator pos ) {
0529 internal_extract(pos.get_node_ptr());
0530 return d1::node_handle_accessor::construct<node_type>(pos.get_node_ptr());
0531 }
0532
0533 node_type unsafe_extract( const key_type& key ) {
0534 iterator item = find(key);
0535 return item == end() ? node_type() : unsafe_extract(item);
0536 }
0537
0538 template <typename K>
0539 typename std::enable_if<is_transparent<K>::value
0540 && !std::is_convertible<K, const_iterator>::value
0541 && !std::is_convertible<K, iterator>::value,
0542 node_type>::type unsafe_extract( const K& key )
0543 {
0544 iterator item = find(key);
0545 return item == end() ? node_type() : unsafe_extract(item);
0546 }
0547
0548
0549 iterator find( const key_type& key ) {
0550 value_node_ptr result = internal_find(key);
0551 return result == nullptr ? end() : iterator(result);
0552 }
0553
0554 const_iterator find( const key_type& key ) const {
0555 value_node_ptr result = const_cast<self_type*>(this)->internal_find(key);
0556 return result == nullptr ? end() : const_iterator(result);
0557 }
0558
0559 template <typename K>
0560 typename std::enable_if<is_transparent<K>::value, iterator>::type find( const K& key ) {
0561 value_node_ptr result = internal_find(key);
0562 return result == nullptr ? end() : iterator(result);
0563 }
0564
0565 template <typename K>
0566 typename std::enable_if<is_transparent<K>::value, const_iterator>::type find( const K& key ) const {
0567 value_node_ptr result = const_cast<self_type*>(this)->internal_find(key);
0568 return result == nullptr ? end() : const_iterator(result);
0569 }
0570
0571 std::pair<iterator, iterator> equal_range( const key_type& key ) {
0572 auto result = internal_equal_range(key);
0573 return std::make_pair(iterator(result.first), iterator(result.second));
0574 }
0575
0576 std::pair<const_iterator, const_iterator> equal_range( const key_type& key ) const {
0577 auto result = const_cast<self_type*>(this)->internal_equal_range(key);
0578 return std::make_pair(const_iterator(result.first), const_iterator(result.second));
0579 }
0580
0581 template <typename K>
0582 typename std::enable_if<is_transparent<K>::value, std::pair<iterator, iterator>>::type equal_range( const K& key ) {
0583 auto result = internal_equal_range(key);
0584 return std::make_pair(iterator(result.first), iterator(result.second));
0585 }
0586
0587 template <typename K>
0588 typename std::enable_if<is_transparent<K>::value, std::pair<const_iterator, const_iterator>>::type equal_range( const K& key ) const {
0589 auto result = const_cast<self_type*>(this)->internal_equal_range(key);
0590 return std::make_pair(iterator(result.first), iterator(result.second));
0591 }
0592
0593 size_type count( const key_type& key ) const {
0594 return internal_count(key);
0595 }
0596
0597 template <typename K>
0598 typename std::enable_if<is_transparent<K>::value, size_type>::type count( const K& key ) const {
0599 return internal_count(key);
0600 }
0601
0602 bool contains( const key_type& key ) const {
0603 return find(key) != end();
0604 }
0605
0606 template <typename K>
0607 typename std::enable_if<is_transparent<K>::value, bool>::type contains( const K& key ) const {
0608 return find(key) != end();
0609 }
0610
0611
0612 local_iterator unsafe_begin( size_type n ) {
0613 return local_iterator(first_value_node(get_bucket(n)));
0614 }
0615
0616 const_local_iterator unsafe_begin( size_type n ) const {
0617 auto bucket_begin = first_value_node(const_cast<self_type*>(this)->get_bucket(n));
0618 return const_local_iterator(bucket_begin);
0619 }
0620
0621 const_local_iterator unsafe_cbegin( size_type n ) const {
0622 auto bucket_begin = first_value_node(const_cast<self_type*>(this)->get_bucket(n));
0623 return const_local_iterator(bucket_begin);
0624 }
0625
0626 local_iterator unsafe_end( size_type n ) {
0627 size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
0628 return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : local_iterator(nullptr);
0629 }
0630
0631 const_local_iterator unsafe_end( size_type n ) const {
0632 size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
0633 return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : const_local_iterator(nullptr);
0634 }
0635
0636 const_local_iterator unsafe_cend( size_type n ) const {
0637 size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
0638 return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : const_local_iterator(nullptr);
0639 }
0640
0641 size_type unsafe_bucket_count() const { return my_bucket_count.load(std::memory_order_relaxed); }
0642
0643 size_type unsafe_max_bucket_count() const {
0644 return max_size();
0645 }
0646
0647 size_type unsafe_bucket_size( size_type n ) const {
0648 return size_type(std::distance(unsafe_begin(n), unsafe_end(n)));
0649 }
0650
0651 size_type unsafe_bucket( const key_type& key ) const {
0652 return my_hash_compare(key) % my_bucket_count.load(std::memory_order_relaxed);
0653 }
0654
0655
0656 float load_factor() const {
0657 return float(size() / float(my_bucket_count.load(std::memory_order_acquire)));
0658 }
0659
0660 float max_load_factor() const { return my_max_load_factor; }
0661
0662 void max_load_factor( float mlf ) {
0663 if (mlf != mlf || mlf < 0) {
0664 tbb::detail::throw_exception(exception_id::invalid_load_factor);
0665 }
0666 my_max_load_factor = mlf;
0667 }
0668
0669 void rehash( size_type bucket_count ) {
0670 size_type current_bucket_count = my_bucket_count.load(std::memory_order_acquire);
0671 if (current_bucket_count < bucket_count) {
0672
0673 my_bucket_count.compare_exchange_strong(current_bucket_count, round_up_to_power_of_two(bucket_count));
0674 }
0675 }
0676
0677 void reserve( size_type elements_count ) {
0678 size_type current_bucket_count = my_bucket_count.load(std::memory_order_acquire);
0679 size_type necessary_bucket_count = current_bucket_count;
0680
0681
0682
0683
0684 while (necessary_bucket_count * max_load_factor() < elements_count) {
0685 necessary_bucket_count <<= 1;
0686 }
0687
0688 while (!my_bucket_count.compare_exchange_strong(current_bucket_count, necessary_bucket_count)) {
0689 if (current_bucket_count >= necessary_bucket_count)
0690 break;
0691 }
0692 }
0693
0694
0695 hasher hash_function() const { return my_hash_compare.hash_function(); }
0696 key_equal key_eq() const { return my_hash_compare.key_eq(); }
0697
0698 class const_range_type {
0699 private:
0700 const concurrent_unordered_base& my_instance;
0701 node_ptr my_begin_node;
0702 node_ptr my_end_node;
0703 mutable node_ptr my_midpoint_node;
0704 public:
0705 using size_type = typename concurrent_unordered_base::size_type;
0706 using value_type = typename concurrent_unordered_base::value_type;
0707 using reference = typename concurrent_unordered_base::reference;
0708 using difference_type = typename concurrent_unordered_base::difference_type;
0709 using iterator = typename concurrent_unordered_base::const_iterator;
0710
0711 bool empty() const { return my_begin_node == my_end_node; }
0712
0713 bool is_divisible() const {
0714 return my_midpoint_node != my_end_node;
0715 }
0716
0717 size_type grainsize() const { return 1; }
0718
0719 const_range_type( const_range_type& range, split )
0720 : my_instance(range.my_instance),
0721 my_begin_node(range.my_midpoint_node),
0722 my_end_node(range.my_end_node)
0723 {
0724 range.my_end_node = my_begin_node;
0725 __TBB_ASSERT(!empty(), "Splitting despite the range is not divisible");
0726 __TBB_ASSERT(!range.empty(), "Splitting despite the range is not divisible");
0727 set_midpoint();
0728 range.set_midpoint();
0729 }
0730
0731 iterator begin() const { return iterator(my_instance.first_value_node(my_begin_node)); }
0732 iterator end() const { return iterator(my_instance.first_value_node(my_end_node)); }
0733
0734 const_range_type( const concurrent_unordered_base& table )
0735 : my_instance(table), my_begin_node(my_instance.first_value_node(const_cast<node_ptr>(&table.my_head))), my_end_node(nullptr)
0736 {
0737 set_midpoint();
0738 }
0739 private:
0740 void set_midpoint() const {
0741 if (empty()) {
0742 my_midpoint_node = my_end_node;
0743 } else {
0744 sokey_type invalid_key = ~sokey_type(0);
0745 sokey_type begin_key = my_begin_node != nullptr ? my_begin_node->order_key() : invalid_key;
0746 sokey_type end_key = my_end_node != nullptr ? my_end_node->order_key() : invalid_key;
0747
0748 size_type mid_bucket = reverse_bits(begin_key + (end_key - begin_key) / 2) %
0749 my_instance.my_bucket_count.load(std::memory_order_relaxed);
0750 while( my_instance.my_segments[mid_bucket].load(std::memory_order_relaxed) == nullptr) {
0751 mid_bucket = my_instance.get_parent(mid_bucket);
0752 }
0753 if (reverse_bits(mid_bucket) > begin_key) {
0754
0755 my_midpoint_node = my_instance.first_value_node(
0756 my_instance.my_segments[mid_bucket].load(std::memory_order_relaxed));
0757 } else {
0758
0759 my_midpoint_node = my_end_node;
0760 }
0761 }
0762 }
0763 };
0764
0765 class range_type : public const_range_type {
0766 public:
0767 using iterator = typename concurrent_unordered_base::iterator;
0768 using const_range_type::const_range_type;
0769
0770 iterator begin() const { return iterator(const_range_type::begin().get_node_ptr()); }
0771 iterator end() const { return iterator(const_range_type::end().get_node_ptr()); }
0772 };
0773
0774
0775 range_type range() {
0776 return range_type(*this);
0777 }
0778
0779 const_range_type range() const {
0780 return const_range_type(*this);
0781 }
0782 protected:
0783 static constexpr bool allow_multimapping = traits_type::allow_multimapping;
0784
0785 private:
0786 static constexpr size_type initial_bucket_count = 8;
0787 static constexpr float initial_max_load_factor = 4;
0788 static constexpr size_type pointers_per_embedded_table = sizeof(size_type) * 8 - 1;
0789
0790 class unordered_segment_table
0791 : public d1::segment_table<std::atomic<node_ptr>, allocator_type, unordered_segment_table, pointers_per_embedded_table>
0792 {
0793 using self_type = unordered_segment_table;
0794 using atomic_node_ptr = std::atomic<node_ptr>;
0795 using base_type = d1::segment_table<std::atomic<node_ptr>, allocator_type, unordered_segment_table, pointers_per_embedded_table>;
0796 using segment_type = typename base_type::segment_type;
0797 using base_allocator_type = typename base_type::allocator_type;
0798
0799 using segment_allocator_type = typename allocator_traits_type::template rebind_alloc<atomic_node_ptr>;
0800 using segment_allocator_traits = tbb::detail::allocator_traits<segment_allocator_type>;
0801 public:
0802
0803 static constexpr bool allow_table_extending = false;
0804 static constexpr bool is_noexcept_assignment = std::is_nothrow_move_assignable<hasher>::value &&
0805 std::is_nothrow_move_assignable<key_equal>::value &&
0806 segment_allocator_traits::is_always_equal::value;
0807 static constexpr bool is_noexcept_swap = tbb::detail::is_nothrow_swappable<hasher>::value &&
0808 tbb::detail::is_nothrow_swappable<key_equal>::value &&
0809 segment_allocator_traits::is_always_equal::value;
0810
0811
0812 unordered_segment_table( const base_allocator_type& alloc = base_allocator_type() )
0813 : base_type(alloc) {}
0814
0815 unordered_segment_table( const unordered_segment_table& ) = default;
0816
0817 unordered_segment_table( const unordered_segment_table& other, const base_allocator_type& alloc )
0818 : base_type(other, alloc) {}
0819
0820 unordered_segment_table( unordered_segment_table&& ) = default;
0821
0822 unordered_segment_table( unordered_segment_table&& other, const base_allocator_type& alloc )
0823 : base_type(std::move(other), alloc) {}
0824
0825 unordered_segment_table& operator=( const unordered_segment_table& ) = default;
0826
0827 unordered_segment_table& operator=( unordered_segment_table&& ) = default;
0828
0829 segment_type create_segment( typename base_type::segment_table_type, typename base_type::segment_index_type segment_index, size_type ) {
0830 segment_allocator_type alloc(this->get_allocator());
0831 size_type seg_size = this->segment_size(segment_index);
0832 segment_type new_segment = segment_allocator_traits::allocate(alloc, seg_size);
0833 for (size_type i = 0; i != seg_size; ++i) {
0834 segment_allocator_traits::construct(alloc, new_segment + i, nullptr);
0835 }
0836 return new_segment;
0837 }
0838
0839 segment_type nullify_segment( typename base_type::segment_table_type table, size_type segment_index ) {
0840 segment_type target_segment = table[segment_index].load(std::memory_order_relaxed);
0841 table[segment_index].store(nullptr, std::memory_order_relaxed);
0842 return target_segment;
0843 }
0844
0845
0846
0847 void deallocate_segment( segment_type address, size_type index ) {
0848 destroy_segment(address, index);
0849 }
0850
0851 void destroy_segment( segment_type address, size_type index ) {
0852 segment_allocator_type alloc(this->get_allocator());
0853 for (size_type i = 0; i != this->segment_size(index); ++i) {
0854 segment_allocator_traits::destroy(alloc, address + i);
0855 }
0856 segment_allocator_traits::deallocate(alloc, address, this->segment_size(index));
0857 }
0858
0859
0860 void copy_segment( size_type index, segment_type, segment_type to ) {
0861 if (index == 0) {
0862
0863
0864
0865 to[1].store(nullptr, std::memory_order_relaxed);
0866 } else {
0867 for (size_type i = 0; i != this->segment_size(index); ++i) {
0868 to[i].store(nullptr, std::memory_order_relaxed);
0869 }
0870 }
0871 }
0872
0873 void move_segment( size_type index, segment_type from, segment_type to ) {
0874 if (index == 0) {
0875
0876
0877
0878 to[1].store(from[1].load(std::memory_order_relaxed), std::memory_order_relaxed);
0879 } else {
0880 for (size_type i = 0; i != this->segment_size(index); ++i) {
0881 to[i].store(from[i].load(std::memory_order_relaxed), std::memory_order_relaxed);
0882 from[i].store(nullptr, std::memory_order_relaxed);
0883 }
0884 }
0885 }
0886
0887
0888 typename base_type::segment_table_type allocate_long_table( const typename base_type::atomic_segment*, size_type ) {
0889 __TBB_ASSERT(false, "This method should never been called");
0890
0891 return nullptr;
0892 }
0893
0894
0895
0896 void destroy_elements() {}
0897 };
0898
0899 void internal_clear() {
0900
0901 node_ptr next = my_head.next();
0902 node_ptr curr = next;
0903
0904 my_head.set_next(nullptr);
0905
0906 while (curr != nullptr) {
0907 next = curr->next();
0908 destroy_node(curr);
0909 curr = next;
0910 }
0911
0912 my_size.store(0, std::memory_order_relaxed);
0913 my_segments.clear();
0914 }
0915
0916 void destroy_node( node_ptr node ) {
0917 if (node->is_dummy()) {
0918 node_allocator_type dummy_node_allocator(my_segments.get_allocator());
0919
0920 node_allocator_traits::destroy(dummy_node_allocator, node);
0921
0922 node_allocator_traits::deallocate(dummy_node_allocator, node, 1);
0923 } else {
0924
0925 #if (__TBB_GCC_VERSION >= 110100 && __TBB_GCC_VERSION < 150000 ) && !__clang__ && !__INTEL_COMPILER
0926 volatile
0927 #endif
0928 value_node_ptr val_node = static_cast<value_node_ptr>(node);
0929 value_node_allocator_type value_node_allocator(my_segments.get_allocator());
0930
0931 value_node_allocator_traits::destroy(value_node_allocator, val_node->storage());
0932
0933 value_node_allocator_traits::destroy(value_node_allocator, val_node);
0934
0935 value_node_allocator_traits::deallocate(value_node_allocator, val_node, 1);
0936 }
0937 }
0938
0939 struct internal_insert_return_type {
0940
0941
0942 value_node_ptr remaining_node;
0943
0944
0945 value_node_ptr node_with_equal_key;
0946
0947
0948 bool inserted;
0949 };
0950
0951
0952 template <typename ValueType>
0953 std::pair<iterator, bool> internal_insert_value( ValueType&& value ) {
0954
0955 auto create_value_node = [&value, this]( sokey_type order_key )->value_node_ptr {
0956 return create_node(order_key, std::forward<ValueType>(value));
0957 };
0958
0959 auto insert_result = internal_insert(value, create_value_node);
0960
0961 if (insert_result.remaining_node != nullptr) {
0962
0963 __TBB_ASSERT(!insert_result.inserted,
0964 "remaining_node should be nullptr if the node was successfully inserted");
0965 destroy_node(insert_result.remaining_node);
0966 }
0967
0968 return { iterator(insert_result.node_with_equal_key), insert_result.inserted };
0969 }
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985 template <typename ValueType, typename CreateInsertNode>
0986 internal_insert_return_type internal_insert( ValueType&& value, CreateInsertNode create_insert_node ) {
0987 static_assert(std::is_same<typename std::decay<ValueType>::type, value_type>::value,
0988 "Incorrect type in internal_insert");
0989 const key_type& key = traits_type::get_key(value);
0990 sokey_type hash_key = sokey_type(my_hash_compare(key));
0991
0992 sokey_type order_key = split_order_key_regular(hash_key);
0993 node_ptr prev = prepare_bucket(hash_key);
0994 __TBB_ASSERT(prev != nullptr, "Invalid head node");
0995
0996 auto search_result = search_after(prev, order_key, key);
0997
0998 if (search_result.second) {
0999 return internal_insert_return_type{ nullptr, search_result.first, false };
1000 }
1001
1002 value_node_ptr new_node = create_insert_node(order_key);
1003 node_ptr curr = search_result.first;
1004
1005 while (!try_insert(prev, new_node, curr)) {
1006 search_result = search_after(prev, order_key, key);
1007 if (search_result.second) {
1008 return internal_insert_return_type{ new_node, search_result.first, false };
1009 }
1010 curr = search_result.first;
1011 }
1012
1013 auto sz = my_size.fetch_add(1);
1014 adjust_table_size(sz + 1, my_bucket_count.load(std::memory_order_acquire));
1015 return internal_insert_return_type{ nullptr, static_cast<value_node_ptr>(new_node), true };
1016 }
1017
1018
1019
1020
1021 std::pair<value_node_ptr, bool> search_after( node_ptr& prev, sokey_type order_key, const key_type& key ) {
1022
1023
1024
1025 node_ptr curr = prev->next();
1026
1027 while (curr != nullptr && (curr->order_key() < order_key ||
1028 (curr->order_key() == order_key && !my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(curr)->value()), key))))
1029 {
1030 prev = curr;
1031 curr = curr->next();
1032 }
1033
1034 if (curr != nullptr && curr->order_key() == order_key && !allow_multimapping) {
1035 return { static_cast<value_node_ptr>(curr), true };
1036 }
1037 return { static_cast<value_node_ptr>(curr), false };
1038 }
1039
1040 void adjust_table_size( size_type total_elements, size_type current_size ) {
1041
1042 if ( (float(total_elements) / float(current_size)) > my_max_load_factor ) {
1043
1044 my_bucket_count.compare_exchange_strong(current_size, 2u * current_size);
1045 }
1046 }
1047
1048 node_ptr insert_dummy_node( node_ptr parent_dummy_node, sokey_type order_key ) {
1049 node_ptr prev_node = parent_dummy_node;
1050
1051 node_ptr dummy_node = create_dummy_node(order_key);
1052 node_ptr next_node;
1053
1054 do {
1055 next_node = prev_node->next();
1056
1057 while (next_node != nullptr && next_node->order_key() < order_key) {
1058 prev_node = next_node;
1059 next_node = next_node->next();
1060 }
1061
1062 if (next_node != nullptr && next_node->order_key() == order_key) {
1063
1064
1065 destroy_node(dummy_node);
1066 return next_node;
1067 }
1068 } while (!try_insert(prev_node, dummy_node, next_node));
1069
1070 return dummy_node;
1071 }
1072
1073
1074
1075 static bool try_insert( node_ptr prev_node, node_ptr new_node, node_ptr current_next_node ) {
1076 new_node->set_next(current_next_node);
1077 return prev_node->try_set_next(current_next_node, new_node);
1078 }
1079
1080
1081 node_ptr prepare_bucket( sokey_type hash_key ) {
1082 size_type bucket = hash_key % my_bucket_count.load(std::memory_order_acquire);
1083 return get_bucket(bucket);
1084 }
1085
1086
1087 node_ptr get_bucket( size_type bucket_index ) {
1088 if (my_segments[bucket_index].load(std::memory_order_acquire) == nullptr) {
1089 init_bucket(bucket_index);
1090 }
1091 return my_segments[bucket_index].load(std::memory_order_acquire);
1092 }
1093
1094 void init_bucket( size_type bucket ) {
1095 if (bucket == 0) {
1096
1097 node_ptr disabled = nullptr;
1098 my_segments[0].compare_exchange_strong(disabled, &my_head);
1099 return;
1100 }
1101
1102 size_type parent_bucket = get_parent(bucket);
1103
1104 while (my_segments[parent_bucket].load(std::memory_order_acquire) == nullptr) {
1105
1106 init_bucket(parent_bucket);
1107 }
1108
1109 __TBB_ASSERT(my_segments[parent_bucket].load(std::memory_order_acquire) != nullptr, "Parent bucket should be initialized");
1110 node_ptr parent = my_segments[parent_bucket].load(std::memory_order_acquire);
1111
1112
1113 node_ptr dummy_node = insert_dummy_node(parent, split_order_key_dummy(bucket));
1114
1115
1116
1117 my_segments[bucket].store(dummy_node, std::memory_order_release);
1118 }
1119
1120 node_ptr create_dummy_node( sokey_type order_key ) {
1121 node_allocator_type dummy_node_allocator(my_segments.get_allocator());
1122 node_ptr dummy_node = node_allocator_traits::allocate(dummy_node_allocator, 1);
1123 node_allocator_traits::construct(dummy_node_allocator, dummy_node, order_key);
1124 return dummy_node;
1125 }
1126
1127 template <typename... Args>
1128 value_node_ptr create_node( sokey_type order_key, Args&&... args ) {
1129 value_node_allocator_type value_node_allocator(my_segments.get_allocator());
1130
1131 value_node_ptr new_node = value_node_allocator_traits::allocate(value_node_allocator, 1);
1132
1133 value_node_allocator_traits::construct(value_node_allocator, new_node, order_key);
1134
1135
1136
1137 auto value_guard = make_raii_guard([&] {
1138 value_node_allocator_traits::destroy(value_node_allocator, new_node);
1139 value_node_allocator_traits::deallocate(value_node_allocator, new_node, 1);
1140 });
1141
1142
1143 value_node_allocator_traits::construct(value_node_allocator, new_node->storage(), std::forward<Args>(args)...);
1144 value_guard.dismiss();
1145 return new_node;
1146 }
1147
1148 value_node_ptr first_value_node( node_ptr first_node ) const {
1149 while (first_node != nullptr && first_node->is_dummy()) {
1150 first_node = first_node->next();
1151 }
1152 return static_cast<value_node_ptr>(first_node);
1153 }
1154
1155
1156 node_ptr internal_erase( value_node_ptr node_to_erase ) {
1157 __TBB_ASSERT(node_to_erase != nullptr, "Invalid iterator for erase");
1158 node_ptr next_node = node_to_erase->next();
1159 internal_extract(node_to_erase);
1160 destroy_node(node_to_erase);
1161 return next_node;
1162 }
1163
1164 template <typename K>
1165 size_type internal_erase_by_key( const K& key ) {
1166
1167
1168 auto eq_range = equal_range(key);
1169 size_type erased_count = 0;
1170
1171 for (auto it = eq_range.first; it != eq_range.second;) {
1172 it = unsafe_erase(it);
1173 ++erased_count;
1174 }
1175 return erased_count;
1176 }
1177
1178
1179 void internal_extract( value_node_ptr node_to_extract ) {
1180 const key_type& key = traits_type::get_key(node_to_extract->value());
1181 sokey_type hash_key = sokey_type(my_hash_compare(key));
1182
1183 node_ptr prev_node = prepare_bucket(hash_key);
1184
1185 for (node_ptr node = prev_node->next(); node != nullptr; prev_node = node, node = node->next()) {
1186 if (node == node_to_extract) {
1187 unlink_node(prev_node, node, node_to_extract->next());
1188 my_size.store(my_size.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed);
1189 return;
1190 }
1191 __TBB_ASSERT(node->order_key() <= node_to_extract->order_key(),
1192 "node, which is going to be extracted should be presented in the list");
1193 }
1194 }
1195
1196 protected:
1197 template <typename SourceType>
1198 void internal_merge( SourceType&& source ) {
1199 static_assert(std::is_same<node_type, typename std::decay<SourceType>::type::node_type>::value,
1200 "Incompatible containers cannot be merged");
1201
1202 for (node_ptr source_prev = &source.my_head; source_prev->next() != nullptr;) {
1203 if (!source_prev->next()->is_dummy()) {
1204 value_node_ptr curr = static_cast<value_node_ptr>(source_prev->next());
1205
1206
1207 if (allow_multimapping || !contains(traits_type::get_key(curr->value()))) {
1208 node_ptr next_node = curr->next();
1209 source.unlink_node(source_prev, curr, next_node);
1210
1211
1212 sokey_type old_order_key = curr->order_key();
1213
1214
1215
1216 node_type curr_node = d1::node_handle_accessor::construct<node_type>(curr);
1217
1218
1219 if (!insert(std::move(curr_node)).second) {
1220 __TBB_ASSERT(!allow_multimapping, "Insertion should succeed for multicontainer");
1221 __TBB_ASSERT(source_prev->next() == next_node,
1222 "Concurrent operations with the source container in merge are prohibited");
1223
1224
1225
1226 curr->init(old_order_key);
1227 __TBB_ASSERT(old_order_key >= source_prev->order_key() &&
1228 (next_node == nullptr || old_order_key <= next_node->order_key()),
1229 "Wrong nodes order in the source container");
1230
1231 curr->set_next(next_node);
1232 source_prev->set_next(curr);
1233 source_prev = curr;
1234 d1::node_handle_accessor::deactivate(curr_node);
1235 } else {
1236 source.my_size.fetch_sub(1, std::memory_order_relaxed);
1237 }
1238 } else {
1239 source_prev = curr;
1240 }
1241 } else {
1242 source_prev = source_prev->next();
1243 }
1244 }
1245 }
1246
1247 private:
1248
1249 void unlink_node( node_ptr prev_node, node_ptr node_to_unlink, node_ptr next_node ) {
1250 __TBB_ASSERT(prev_node->next() == node_to_unlink &&
1251 node_to_unlink->next() == next_node,
1252 "erasing and extracting nodes from the containers are unsafe in concurrent mode");
1253 prev_node->set_next(next_node);
1254 node_to_unlink->set_next(nullptr);
1255 }
1256
1257 template <typename K>
1258 value_node_ptr internal_find( const K& key ) {
1259 sokey_type hash_key = sokey_type(my_hash_compare(key));
1260 sokey_type order_key = split_order_key_regular(hash_key);
1261
1262 node_ptr curr = prepare_bucket(hash_key);
1263
1264 while (curr != nullptr) {
1265 if (curr->order_key() > order_key) {
1266
1267
1268 return nullptr;
1269 } else if (curr->order_key() == order_key &&
1270 my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(curr)->value()), key)) {
1271
1272
1273
1274 return static_cast<value_node_ptr>(curr);
1275 }
1276 curr = curr->next();
1277 }
1278
1279 return nullptr;
1280 }
1281
1282 template <typename K>
1283 std::pair<value_node_ptr, value_node_ptr> internal_equal_range( const K& key ) {
1284 sokey_type hash_key = sokey_type(my_hash_compare(key));
1285 sokey_type order_key = split_order_key_regular(hash_key);
1286
1287 node_ptr curr = prepare_bucket(hash_key);
1288
1289 while (curr != nullptr) {
1290 if (curr->order_key() > order_key) {
1291
1292
1293 return std::make_pair(nullptr, nullptr);
1294 } else if (curr->order_key() == order_key &&
1295 my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(curr)->value()), key)) {
1296 value_node_ptr first = static_cast<value_node_ptr>(curr);
1297 node_ptr last = first;
1298 do {
1299 last = last->next();
1300 } while (allow_multimapping && last != nullptr && !last->is_dummy() &&
1301 my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(last)->value()), key));
1302 return std::make_pair(first, first_value_node(last));
1303 }
1304 curr = curr->next();
1305 }
1306 return {nullptr, nullptr};
1307 }
1308
1309 template <typename K>
1310 size_type internal_count( const K& key ) const {
1311 if (allow_multimapping) {
1312
1313 auto eq_range = equal_range(key);
1314 return std::distance(eq_range.first, eq_range.second);
1315 } else {
1316 return contains(key) ? 1 : 0;
1317 }
1318 }
1319
1320 void internal_copy( const concurrent_unordered_base& other ) {
1321 node_ptr last_node = &my_head;
1322 my_segments[0].store(&my_head, std::memory_order_relaxed);
1323
1324 for (node_ptr node = other.my_head.next(); node != nullptr; node = node->next()) {
1325 node_ptr new_node;
1326 if (!node->is_dummy()) {
1327
1328 new_node = create_node(node->order_key(), static_cast<value_node_ptr>(node)->value());
1329 } else {
1330
1331 new_node = create_dummy_node(node->order_key());
1332 my_segments[reverse_bits(node->order_key())].store(new_node, std::memory_order_relaxed);
1333 }
1334
1335 last_node->set_next(new_node);
1336 last_node = new_node;
1337 }
1338 }
1339
1340 void internal_move( concurrent_unordered_base&& other ) {
1341 node_ptr last_node = &my_head;
1342 my_segments[0].store(&my_head, std::memory_order_relaxed);
1343
1344 for (node_ptr node = other.my_head.next(); node != nullptr; node = node->next()) {
1345 node_ptr new_node;
1346 if (!node->is_dummy()) {
1347
1348 new_node = create_node(node->order_key(), std::move(static_cast<value_node_ptr>(node)->value()));
1349 } else {
1350
1351
1352 new_node = create_dummy_node(node->order_key());
1353 my_segments[reverse_bits(node->order_key())].store(new_node, std::memory_order_relaxed);
1354 }
1355
1356 last_node->set_next(new_node);
1357 last_node = new_node;
1358 }
1359 }
1360
1361 void move_content( concurrent_unordered_base&& other ) {
1362
1363 my_head.set_next(other.my_head.next());
1364 other.my_head.set_next(nullptr);
1365 my_segments[0].store(&my_head, std::memory_order_relaxed);
1366
1367 other.my_bucket_count.store(initial_bucket_count, std::memory_order_relaxed);
1368 other.my_max_load_factor = initial_max_load_factor;
1369 other.my_size.store(0, std::memory_order_relaxed);
1370 }
1371
1372 void internal_move_construct_with_allocator( concurrent_unordered_base&& other, const allocator_type&,
1373 std::true_type ) {
1374
1375 move_content(std::move(other));
1376 }
1377
1378 void internal_move_construct_with_allocator( concurrent_unordered_base&& other, const allocator_type& alloc,
1379 std::false_type ) {
1380
1381 if (alloc == other.my_segments.get_allocator()) {
1382 move_content(std::move(other));
1383 } else {
1384 try_call( [&] {
1385 internal_move(std::move(other));
1386 } ).on_exception( [&] {
1387 clear();
1388 });
1389 }
1390 }
1391
1392
1393
1394 void internal_move_assign( concurrent_unordered_base&& other, std::true_type ) {
1395 move_content(std::move(other));
1396 }
1397
1398
1399
1400 void internal_move_assign( concurrent_unordered_base&& other, std::false_type ) {
1401 if (my_segments.get_allocator() == other.my_segments.get_allocator()) {
1402 move_content(std::move(other));
1403 } else {
1404
1405 internal_move(std::move(other));
1406 }
1407 }
1408
1409 void internal_swap( concurrent_unordered_base& other, std::true_type ) {
1410 internal_swap_fields(other);
1411 }
1412
1413 void internal_swap( concurrent_unordered_base& other, std::false_type ) {
1414 __TBB_ASSERT(my_segments.get_allocator() == other.my_segments.get_allocator(),
1415 "Swapping with unequal allocators is not allowed");
1416 internal_swap_fields(other);
1417 }
1418
1419 void internal_swap_fields( concurrent_unordered_base& other ) {
1420 node_ptr first_node = my_head.next();
1421 my_head.set_next(other.my_head.next());
1422 other.my_head.set_next(first_node);
1423
1424 size_type current_size = my_size.load(std::memory_order_relaxed);
1425 my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
1426 other.my_size.store(current_size, std::memory_order_relaxed);
1427
1428 size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
1429 my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed);
1430 other.my_bucket_count.store(bucket_count, std::memory_order_relaxed);
1431
1432 using std::swap;
1433 swap(my_max_load_factor, other.my_max_load_factor);
1434 swap(my_hash_compare, other.my_hash_compare);
1435 my_segments.swap(other.my_segments);
1436
1437
1438
1439
1440 my_segments[0].store(&my_head, std::memory_order_relaxed);
1441 other.my_segments[0].store(&other.my_head, std::memory_order_relaxed);
1442 }
1443
1444
1445 static constexpr sokey_type split_order_key_regular( sokey_type hash ) {
1446 return reverse_bits(hash) | 0x1;
1447 }
1448
1449
1450 static constexpr sokey_type split_order_key_dummy( sokey_type hash ) {
1451 return reverse_bits(hash) & ~sokey_type(0x1);
1452 }
1453
1454 size_type get_parent( size_type bucket ) const {
1455
1456 __TBB_ASSERT(bucket != 0, "Unable to get_parent of the bucket 0");
1457 size_type msb = tbb::detail::log2(bucket);
1458 return bucket & ~(size_type(1) << msb);
1459 }
1460
1461 size_type get_next_bucket_index( size_type bucket ) const {
1462 size_type bits = tbb::detail::log2(my_bucket_count.load(std::memory_order_relaxed));
1463 size_type reversed_next = reverse_n_bits(bucket, bits) + 1;
1464 return reverse_n_bits(reversed_next, bits);
1465 }
1466
1467 std::atomic<size_type> my_size;
1468 std::atomic<size_type> my_bucket_count;
1469 float my_max_load_factor;
1470 hash_compare_type my_hash_compare;
1471
1472 list_node_type my_head;
1473 unordered_segment_table my_segments;
1474
1475 template <typename Container, typename Value>
1476 friend class solist_iterator;
1477
1478 template <typename OtherTraits>
1479 friend class concurrent_unordered_base;
1480 };
1481
1482 template <typename Traits>
1483 bool operator==( const concurrent_unordered_base<Traits>& lhs,
1484 const concurrent_unordered_base<Traits>& rhs ) {
1485 if (&lhs == &rhs) { return true; }
1486 if (lhs.size() != rhs.size()) { return false; }
1487
1488 #if _MSC_VER
1489
1490
1491
1492
1493 return std::is_permutation(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
1494 #else
1495 return std::is_permutation(lhs.begin(), lhs.end(), rhs.begin());
1496 #endif
1497 }
1498
1499 #if !__TBB_CPP20_COMPARISONS_PRESENT
1500 template <typename Traits>
1501 bool operator!=( const concurrent_unordered_base<Traits>& lhs,
1502 const concurrent_unordered_base<Traits>& rhs ) {
1503 return !(lhs == rhs);
1504 }
1505 #endif
1506
1507 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1508 #pragma warning(pop)
1509 #endif
1510
1511 }
1512 }
1513 }
1514
1515 #endif