Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-07-30 08:46:15

0001 /*
0002     Copyright (c) 2005-2023 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_detail__concurrent_unordered_base_H
0018 #define __TBB_detail__concurrent_unordered_base_H
0019 
0020 #if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H)
0021 #error Do not #include this internal file directly; use public TBB headers instead.
0022 #endif
0023 
0024 #include "_range_common.h"
0025 #include "_containers_helpers.h"
0026 #include "_segment_table.h"
0027 #include "_hash_compare.h"
0028 #include "_allocator_traits.h"
0029 #include "_node_handle.h"
0030 #include "_assert.h"
0031 #include "_utils.h"
0032 #include "_exception.h"
0033 #include <iterator>
0034 #include <utility>
0035 #include <functional>
0036 #include <initializer_list>
0037 #include <atomic>
0038 #include <type_traits>
0039 #include <memory>
0040 #include <algorithm>
0041 
0042 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
0043 #pragma warning(push)
0044 #pragma warning(disable: 4127) // warning C4127: conditional expression is constant
0045 #endif
0046 
0047 namespace tbb {
0048 namespace detail {
0049 namespace d1 {
0050 
0051 template <typename Traits>
0052 class concurrent_unordered_base;
0053 
0054 template<typename Container, typename Value>
0055 class solist_iterator {
0056 private:
0057     using node_ptr = typename Container::value_node_ptr;
0058     template <typename T, typename Allocator>
0059     friend class split_ordered_list;
0060     template<typename M, typename V>
0061     friend class solist_iterator;
0062     template <typename Traits>
0063     friend class concurrent_unordered_base;
0064     template<typename M, typename T, typename U>
0065     friend bool operator==( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
0066     template<typename M, typename T, typename U>
0067     friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
0068 public:
0069     using value_type = Value;
0070     using difference_type = typename Container::difference_type;
0071     using pointer = value_type*;
0072     using reference = value_type&;
0073     using iterator_category = std::forward_iterator_tag;
0074 
0075     solist_iterator() : my_node_ptr(nullptr) {}
0076     solist_iterator( const solist_iterator<Container, typename Container::value_type>& other )
0077         : my_node_ptr(other.my_node_ptr) {}
0078 
0079     solist_iterator& operator=( const solist_iterator<Container, typename Container::value_type>& other ) {
0080         my_node_ptr = other.my_node_ptr;
0081         return *this;
0082     }
0083 
0084     reference operator*() const {
0085         return my_node_ptr->value();
0086     }
0087 
0088     pointer operator->() const {
0089         return my_node_ptr->storage();
0090     }
0091 
0092     solist_iterator& operator++() {
0093         auto next_node = my_node_ptr->next();
0094         while(next_node && next_node->is_dummy()) {
0095             next_node = next_node->next();
0096         }
0097         my_node_ptr = static_cast<node_ptr>(next_node);
0098         return *this;
0099     }
0100 
0101     solist_iterator operator++(int) {
0102         solist_iterator tmp = *this;
0103         ++*this;
0104         return tmp;
0105     }
0106 
0107 private:
0108     solist_iterator( node_ptr pnode ) : my_node_ptr(pnode) {}
0109 
0110     node_ptr get_node_ptr() const { return my_node_ptr; }
0111 
0112     node_ptr my_node_ptr;
0113 };
0114 
0115 template<typename Solist, typename T, typename U>
0116 bool operator==( const solist_iterator<Solist, T>& i, const solist_iterator<Solist, U>& j ) {
0117     return i.my_node_ptr == j.my_node_ptr;
0118 }
0119 
0120 template<typename Solist, typename T, typename U>
0121 bool operator!=( const solist_iterator<Solist, T>& i, const solist_iterator<Solist, U>& j ) {
0122     return i.my_node_ptr != j.my_node_ptr;
0123 }
0124 
0125 template <typename SokeyType>
0126 class list_node {
0127 public:
0128     using node_ptr = list_node*;
0129     using sokey_type = SokeyType;
0130 
0131     list_node(sokey_type key) : my_next(nullptr), my_order_key(key) {}
0132 
0133     void init( sokey_type key ) {
0134         my_order_key = key;
0135     }
0136 
0137     sokey_type order_key() const {
0138         return my_order_key;
0139     }
0140 
0141     bool is_dummy() {
0142         // The last bit of order key is unset for dummy nodes
0143         return (my_order_key & 0x1) == 0;
0144     }
0145 
0146     node_ptr next() const {
0147         return my_next.load(std::memory_order_acquire);
0148     }
0149 
0150     void set_next( node_ptr next_node ) {
0151         my_next.store(next_node, std::memory_order_release);
0152     }
0153 
0154     bool try_set_next( node_ptr expected_next, node_ptr new_next ) {
0155         return my_next.compare_exchange_strong(expected_next, new_next);
0156     }
0157 
0158 private:
0159     std::atomic<node_ptr> my_next;
0160     sokey_type my_order_key;
0161 }; // class list_node
0162 
0163 template <typename ValueType, typename SokeyType>
0164 class value_node : public list_node<SokeyType>
0165 {
0166 public:
0167     using base_type = list_node<SokeyType>;
0168     using sokey_type = typename base_type::sokey_type;
0169     using value_type = ValueType;
0170 
0171     value_node( sokey_type ord_key ) : base_type(ord_key) {}
0172     ~value_node() {}
0173     value_type* storage() {
0174         return reinterpret_cast<value_type*>(&my_value);
0175     }
0176 
0177     value_type& value() {
0178         return *storage();
0179     }
0180 
0181 private:
0182     using aligned_storage_type = typename std::aligned_storage<sizeof(value_type)>::type;
0183     aligned_storage_type my_value;
0184 }; // class value_node
0185 
0186 template <typename Traits>
0187 class concurrent_unordered_base {
0188     using self_type = concurrent_unordered_base<Traits>;
0189     using traits_type = Traits;
0190     using hash_compare_type = typename traits_type::hash_compare_type;
0191     class unordered_segment_table;
0192 public:
0193     using value_type = typename traits_type::value_type;
0194     using key_type = typename traits_type::key_type;
0195     using allocator_type = typename traits_type::allocator_type;
0196 
0197 private:
0198     using allocator_traits_type = tbb::detail::allocator_traits<allocator_type>;
0199     // TODO: check assert conditions for different C++ standards
0200     static_assert(std::is_same<typename allocator_traits_type::value_type, value_type>::value,
0201                   "value_type of the container must be the same as its allocator");
0202     using sokey_type = std::size_t;
0203 
0204 public:
0205     using size_type = std::size_t;
0206     using difference_type = std::ptrdiff_t;
0207 
0208     using iterator = solist_iterator<self_type, value_type>;
0209     using const_iterator = solist_iterator<self_type, const value_type>;
0210     using local_iterator = iterator;
0211     using const_local_iterator = const_iterator;
0212 
0213     using reference = value_type&;
0214     using const_reference = const value_type&;
0215     using pointer = typename allocator_traits_type::pointer;
0216     using const_pointer = typename allocator_traits_type::const_pointer;
0217 
0218     using hasher = typename hash_compare_type::hasher;
0219     using key_equal = typename hash_compare_type::key_equal;
0220 
0221 private:
0222     using list_node_type = list_node<sokey_type>;
0223     using value_node_type = value_node<value_type, sokey_type>;
0224     using node_ptr = list_node_type*;
0225     using value_node_ptr = value_node_type*;
0226 
0227     using value_node_allocator_type = typename allocator_traits_type::template rebind_alloc<value_node_type>;
0228     using node_allocator_type = typename allocator_traits_type::template rebind_alloc<list_node_type>;
0229 
0230     using node_allocator_traits = tbb::detail::allocator_traits<node_allocator_type>;
0231     using value_node_allocator_traits = tbb::detail::allocator_traits<value_node_allocator_type>;
0232 
0233     static constexpr size_type round_up_to_power_of_two( size_type bucket_count ) {
0234         return size_type(1) << size_type(tbb::detail::log2(uintptr_t(bucket_count == 0 ? 1 : bucket_count) * 2 - 1));
0235     }
0236 
0237     template <typename T>
0238     using is_transparent = dependent_bool<has_transparent_key_equal<key_type, hasher, key_equal>, T>;
0239 public:
0240     using node_type = node_handle<key_type, value_type, value_node_type, allocator_type>;
0241 
0242     explicit concurrent_unordered_base( size_type bucket_count, const hasher& hash = hasher(),
0243                                         const key_equal& equal = key_equal(), const allocator_type& alloc = allocator_type() )
0244         : my_size(0),
0245           my_bucket_count(round_up_to_power_of_two(bucket_count)),
0246           my_max_load_factor(float(initial_max_load_factor)),
0247           my_hash_compare(hash, equal),
0248           my_head(sokey_type(0)),
0249           my_segments(alloc) {}
0250 
0251     concurrent_unordered_base() : concurrent_unordered_base(initial_bucket_count) {}
0252 
0253     concurrent_unordered_base( size_type bucket_count, const allocator_type& alloc )
0254         : concurrent_unordered_base(bucket_count, hasher(), key_equal(), alloc) {}
0255 
0256     concurrent_unordered_base( size_type bucket_count, const hasher& hash, const allocator_type& alloc )
0257         : concurrent_unordered_base(bucket_count, hash, key_equal(), alloc) {}
0258 
0259     explicit concurrent_unordered_base( const allocator_type& alloc )
0260         : concurrent_unordered_base(initial_bucket_count, hasher(), key_equal(), alloc) {}
0261 
0262     template <typename InputIterator>
0263     concurrent_unordered_base( InputIterator first, InputIterator last,
0264                                size_type bucket_count = initial_bucket_count, const hasher& hash = hasher(),
0265                                const key_equal& equal = key_equal(), const allocator_type& alloc = allocator_type() )
0266         : concurrent_unordered_base(bucket_count, hash, equal, alloc)
0267     {
0268         insert(first, last);
0269     }
0270 
0271     template <typename InputIterator>
0272     concurrent_unordered_base( InputIterator first, InputIterator last,
0273                                size_type bucket_count, const allocator_type& alloc )
0274         : concurrent_unordered_base(first, last, bucket_count, hasher(), key_equal(), alloc) {}
0275 
0276     template <typename InputIterator>
0277     concurrent_unordered_base( InputIterator first, InputIterator last,
0278                                size_type bucket_count, const hasher& hash, const allocator_type& alloc )
0279         : concurrent_unordered_base(first, last, bucket_count, hash, key_equal(), alloc) {}
0280 
0281     concurrent_unordered_base( const concurrent_unordered_base& other )
0282         : my_size(other.my_size.load(std::memory_order_relaxed)),
0283           my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
0284           my_max_load_factor(other.my_max_load_factor),
0285           my_hash_compare(other.my_hash_compare),
0286           my_head(other.my_head.order_key()),
0287           my_segments(other.my_segments)
0288     {
0289         try_call( [&] {
0290             internal_copy(other);
0291         } ).on_exception( [&] {
0292             clear();
0293         });
0294     }
0295 
0296     concurrent_unordered_base( const concurrent_unordered_base& other, const allocator_type& alloc )
0297         : my_size(other.my_size.load(std::memory_order_relaxed)),
0298           my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
0299           my_max_load_factor(other.my_max_load_factor),
0300           my_hash_compare(other.my_hash_compare),
0301           my_head(other.my_head.order_key()),
0302           my_segments(other.my_segments, alloc)
0303     {
0304         try_call( [&] {
0305             internal_copy(other);
0306         } ).on_exception( [&] {
0307             clear();
0308         });
0309     }
0310 
0311     concurrent_unordered_base( concurrent_unordered_base&& other )
0312         : my_size(other.my_size.load(std::memory_order_relaxed)),
0313           my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
0314           my_max_load_factor(std::move(other.my_max_load_factor)),
0315           my_hash_compare(std::move(other.my_hash_compare)),
0316           my_head(other.my_head.order_key()),
0317           my_segments(std::move(other.my_segments))
0318     {
0319         move_content(std::move(other));
0320     }
0321 
0322     concurrent_unordered_base( concurrent_unordered_base&& other, const allocator_type& alloc )
0323         : my_size(other.my_size.load(std::memory_order_relaxed)),
0324           my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
0325           my_max_load_factor(std::move(other.my_max_load_factor)),
0326           my_hash_compare(std::move(other.my_hash_compare)),
0327           my_head(other.my_head.order_key()),
0328           my_segments(std::move(other.my_segments), alloc)
0329     {
0330         using is_always_equal = typename allocator_traits_type::is_always_equal;
0331         internal_move_construct_with_allocator(std::move(other), alloc, is_always_equal());
0332     }
0333 
0334     concurrent_unordered_base( std::initializer_list<value_type> init,
0335                                size_type bucket_count = initial_bucket_count,
0336                                const hasher& hash = hasher(), const key_equal& equal = key_equal(),
0337                                const allocator_type& alloc = allocator_type() )
0338         : concurrent_unordered_base(init.begin(), init.end(), bucket_count, hash, equal, alloc) {}
0339 
0340     concurrent_unordered_base( std::initializer_list<value_type> init,
0341                                size_type bucket_count, const allocator_type& alloc )
0342         : concurrent_unordered_base(init, bucket_count, hasher(), key_equal(), alloc) {}
0343 
0344     concurrent_unordered_base( std::initializer_list<value_type> init,
0345                                size_type bucket_count, const hasher& hash, const allocator_type& alloc )
0346         : concurrent_unordered_base(init, bucket_count, hash, key_equal(), alloc) {}
0347 
0348     ~concurrent_unordered_base() {
0349         internal_clear();
0350     }
0351 
0352     concurrent_unordered_base& operator=( const concurrent_unordered_base& other ) {
0353         if (this != &other) {
0354             clear();
0355             my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
0356             my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed);
0357             my_max_load_factor = other.my_max_load_factor;
0358             my_hash_compare = other.my_hash_compare;
0359             my_segments = other.my_segments;
0360             internal_copy(other); // TODO: guards for exceptions?
0361         }
0362         return *this;
0363     }
0364 
0365     concurrent_unordered_base& operator=( concurrent_unordered_base&& other ) noexcept(unordered_segment_table::is_noexcept_assignment) {
0366         if (this != &other) {
0367             clear();
0368             my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
0369             my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed);
0370             my_max_load_factor = std::move(other.my_max_load_factor);
0371             my_hash_compare = std::move(other.my_hash_compare);
0372             my_segments = std::move(other.my_segments);
0373 
0374             using pocma_type = typename allocator_traits_type::propagate_on_container_move_assignment;
0375             using is_always_equal = typename allocator_traits_type::is_always_equal;
0376             internal_move_assign(std::move(other), tbb::detail::disjunction<pocma_type, is_always_equal>());
0377         }
0378         return *this;
0379     }
0380 
0381     concurrent_unordered_base& operator=( std::initializer_list<value_type> init ) {
0382         clear();
0383         insert(init);
0384         return *this;
0385     }
0386 
0387     void swap( concurrent_unordered_base& other ) noexcept(unordered_segment_table::is_noexcept_swap) {
0388         if (this != &other) {
0389             using pocs_type = typename allocator_traits_type::propagate_on_container_swap;
0390             using is_always_equal = typename allocator_traits_type::is_always_equal;
0391             internal_swap(other, tbb::detail::disjunction<pocs_type, is_always_equal>());
0392         }
0393     }
0394 
0395     allocator_type get_allocator() const noexcept { return my_segments.get_allocator(); }
0396 
0397     iterator begin() noexcept { return iterator(first_value_node(&my_head)); }
0398     const_iterator begin() const noexcept { return const_iterator(first_value_node(const_cast<node_ptr>(&my_head))); }
0399     const_iterator cbegin() const noexcept { return const_iterator(first_value_node(const_cast<node_ptr>(&my_head))); }
0400 
0401     iterator end() noexcept { return iterator(nullptr); }
0402     const_iterator end() const noexcept { return const_iterator(nullptr); }
0403     const_iterator cend() const noexcept { return const_iterator(nullptr); }
0404 
0405     __TBB_nodiscard bool empty() const noexcept { return size() == 0; }
0406     size_type size() const noexcept { return my_size.load(std::memory_order_relaxed); }
0407     size_type max_size() const noexcept { return allocator_traits_type::max_size(get_allocator()); }
0408 
0409     void clear() noexcept {
0410         internal_clear();
0411     }
0412 
0413     std::pair<iterator, bool> insert( const value_type& value ) {
0414         return internal_insert_value(value);
0415     }
0416 
0417     std::pair<iterator, bool> insert( value_type&& value ) {
0418         return internal_insert_value(std::move(value));
0419     }
0420 
0421     iterator insert( const_iterator, const value_type& value ) {
0422         // Ignore hint
0423         return insert(value).first;
0424     }
0425 
0426     iterator insert( const_iterator, value_type&& value ) {
0427         // Ignore hint
0428         return insert(std::move(value)).first;
0429     }
0430 
0431     template <typename InputIterator>
0432     void insert( InputIterator first, InputIterator last ) {
0433         for (; first != last; ++first) {
0434             insert(*first);
0435         }
0436     }
0437 
0438     void insert( std::initializer_list<value_type> init ) {
0439         insert(init.begin(), init.end());
0440     }
0441 
0442     std::pair<iterator, bool> insert( node_type&& nh ) {
0443         if (!nh.empty()) {
0444             value_node_ptr insert_node = node_handle_accessor::get_node_ptr(nh);
0445             auto init_node = [&insert_node]( sokey_type order_key )->value_node_ptr {
0446                 insert_node->init(order_key);
0447                 return insert_node;
0448             };
0449             auto insert_result = internal_insert(insert_node->value(), init_node);
0450             if (insert_result.inserted) {
0451                 // If the insertion succeeded - set node handle to the empty state
0452                 __TBB_ASSERT(insert_result.remaining_node == nullptr,
0453                             "internal_insert_node should not return the remaining node if the insertion succeeded");
0454                 node_handle_accessor::deactivate(nh);
0455             }
0456             return { iterator(insert_result.node_with_equal_key), insert_result.inserted };
0457         }
0458         return {end(), false};
0459     }
0460 
0461     iterator insert( const_iterator, node_type&& nh ) {
0462         // Ignore hint
0463         return insert(std::move(nh)).first;
0464     }
0465 
0466     template <typename... Args>
0467     std::pair<iterator, bool> emplace( Args&&... args ) {
0468         // Create a node with temporary order_key 0, which will be reinitialize
0469         // in internal_insert after the hash calculation
0470         value_node_ptr insert_node = create_node(0, std::forward<Args>(args)...);
0471 
0472         auto init_node = [&insert_node]( sokey_type order_key )->value_node_ptr {
0473             insert_node->init(order_key);
0474             return insert_node;
0475         };
0476 
0477         auto insert_result = internal_insert(insert_node->value(), init_node);
0478 
0479         if (!insert_result.inserted) {
0480             // If the insertion failed - destroy the node which was created
0481             insert_node->init(split_order_key_regular(1));
0482             destroy_node(insert_node);
0483         }
0484 
0485         return { iterator(insert_result.node_with_equal_key), insert_result.inserted };
0486     }
0487 
0488     template <typename... Args>
0489     iterator emplace_hint( const_iterator, Args&&... args ) {
0490         // Ignore hint
0491         return emplace(std::forward<Args>(args)...).first;
0492     }
0493 
0494     iterator unsafe_erase( const_iterator pos ) {
0495         return iterator(first_value_node(internal_erase(pos.get_node_ptr())));
0496     }
0497 
0498     iterator unsafe_erase( iterator pos ) {
0499         return iterator(first_value_node(internal_erase(pos.get_node_ptr())));
0500     }
0501 
0502     iterator unsafe_erase( const_iterator first, const_iterator last ) {
0503         while(first != last) {
0504             first = unsafe_erase(first);
0505         }
0506         return iterator(first.get_node_ptr());
0507     }
0508 
0509     size_type unsafe_erase( const key_type& key ) {
0510         return internal_erase_by_key(key);
0511     }
0512 
0513     template <typename K>
0514     typename std::enable_if<is_transparent<K>::value
0515                             && !std::is_convertible<K, const_iterator>::value
0516                             && !std::is_convertible<K, iterator>::value,
0517                             size_type>::type unsafe_erase( const K& key )
0518     {
0519         return internal_erase_by_key(key);
0520     }
0521 
0522     node_type unsafe_extract( const_iterator pos ) {
0523         internal_extract(pos.get_node_ptr());
0524         return node_handle_accessor::construct<node_type>(pos.get_node_ptr());
0525     }
0526 
0527     node_type unsafe_extract( iterator pos ) {
0528         internal_extract(pos.get_node_ptr());
0529         return node_handle_accessor::construct<node_type>(pos.get_node_ptr());
0530     }
0531 
0532     node_type unsafe_extract( const key_type& key ) {
0533         iterator item = find(key);
0534         return item == end() ? node_type() : unsafe_extract(item);
0535     }
0536 
0537     template <typename K>
0538     typename std::enable_if<is_transparent<K>::value
0539                             && !std::is_convertible<K, const_iterator>::value
0540                             && !std::is_convertible<K, iterator>::value,
0541                             node_type>::type unsafe_extract( const K& key )
0542     {
0543         iterator item = find(key);
0544         return item == end() ? node_type() : unsafe_extract(item);
0545     }
0546 
0547     // Lookup functions
0548     iterator find( const key_type& key ) {
0549         value_node_ptr result = internal_find(key);
0550         return result == nullptr ? end() : iterator(result);
0551     }
0552 
0553     const_iterator find( const key_type& key ) const {
0554         value_node_ptr result = const_cast<self_type*>(this)->internal_find(key);
0555         return result == nullptr ? end() : const_iterator(result);
0556     }
0557 
0558     template <typename K>
0559     typename std::enable_if<is_transparent<K>::value, iterator>::type find( const K& key ) {
0560         value_node_ptr result = internal_find(key);
0561         return result == nullptr ? end() : iterator(result);
0562     }
0563 
0564     template <typename K>
0565     typename std::enable_if<is_transparent<K>::value, const_iterator>::type find( const K& key ) const {
0566         value_node_ptr result = const_cast<self_type*>(this)->internal_find(key);
0567         return result == nullptr ? end() : const_iterator(result);
0568     }
0569 
0570     std::pair<iterator, iterator> equal_range( const key_type& key ) {
0571         auto result = internal_equal_range(key);
0572         return std::make_pair(iterator(result.first), iterator(result.second));
0573     }
0574 
0575     std::pair<const_iterator, const_iterator> equal_range( const key_type& key ) const {
0576         auto result = const_cast<self_type*>(this)->internal_equal_range(key);
0577         return std::make_pair(const_iterator(result.first), const_iterator(result.second));
0578     }
0579 
0580     template <typename K>
0581     typename std::enable_if<is_transparent<K>::value, std::pair<iterator, iterator>>::type equal_range( const K& key ) {
0582         auto result = internal_equal_range(key);
0583         return std::make_pair(iterator(result.first), iterator(result.second));
0584     }
0585 
0586     template <typename K>
0587     typename std::enable_if<is_transparent<K>::value, std::pair<const_iterator, const_iterator>>::type equal_range( const K& key ) const {
0588         auto result = const_cast<self_type*>(this)->internal_equal_range(key);
0589         return std::make_pair(iterator(result.first), iterator(result.second));
0590     }
0591 
0592     size_type count( const key_type& key ) const {
0593         return internal_count(key);
0594     }
0595 
0596     template <typename K>
0597     typename std::enable_if<is_transparent<K>::value, size_type>::type count( const K& key ) const {
0598         return internal_count(key);
0599     }
0600 
0601     bool contains( const key_type& key ) const {
0602         return find(key) != end();
0603     }
0604 
0605     template <typename K>
0606     typename std::enable_if<is_transparent<K>::value, bool>::type contains( const K& key ) const {
0607         return find(key) != end();
0608     }
0609 
0610     // Bucket interface
0611     local_iterator unsafe_begin( size_type n ) {
0612         return local_iterator(first_value_node(get_bucket(n)));
0613     }
0614 
0615     const_local_iterator unsafe_begin( size_type n ) const {
0616         auto bucket_begin = first_value_node(const_cast<self_type*>(this)->get_bucket(n));
0617         return const_local_iterator(bucket_begin);
0618     }
0619 
0620     const_local_iterator unsafe_cbegin( size_type n ) const {
0621         auto bucket_begin = first_value_node(const_cast<self_type*>(this)->get_bucket(n));
0622         return const_local_iterator(bucket_begin);
0623     }
0624 
0625     local_iterator unsafe_end( size_type n ) {
0626         size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
0627         return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : local_iterator(nullptr);
0628     }
0629 
0630     const_local_iterator unsafe_end( size_type n ) const {
0631         size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
0632         return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : const_local_iterator(nullptr);
0633     }
0634 
0635     const_local_iterator unsafe_cend( size_type n ) const {
0636         size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
0637         return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : const_local_iterator(nullptr);
0638     }
0639 
0640     size_type unsafe_bucket_count() const { return my_bucket_count.load(std::memory_order_relaxed); }
0641 
0642     size_type unsafe_max_bucket_count() const {
0643         return max_size();
0644     }
0645 
0646     size_type unsafe_bucket_size( size_type n ) const {
0647         return size_type(std::distance(unsafe_begin(n), unsafe_end(n)));
0648     }
0649 
0650     size_type unsafe_bucket( const key_type& key ) const {
0651         return my_hash_compare(key) % my_bucket_count.load(std::memory_order_relaxed);
0652     }
0653 
0654     // Hash policy
0655     float load_factor() const {
0656         return float(size() / float(my_bucket_count.load(std::memory_order_acquire)));
0657     }
0658 
0659     float max_load_factor() const { return my_max_load_factor; }
0660 
0661     void max_load_factor( float mlf ) {
0662         if (mlf != mlf || mlf < 0) {
0663             tbb::detail::throw_exception(exception_id::invalid_load_factor);
0664         }
0665         my_max_load_factor = mlf;
0666     } // TODO: unsafe?
0667 
0668     void rehash( size_type bucket_count ) {
0669         size_type current_bucket_count = my_bucket_count.load(std::memory_order_acquire);
0670         if (current_bucket_count < bucket_count) {
0671             // TODO: do we need do-while here?
0672             my_bucket_count.compare_exchange_strong(current_bucket_count, round_up_to_power_of_two(bucket_count));
0673         }
0674     }
0675 
0676     void reserve( size_type elements_count ) {
0677         size_type current_bucket_count = my_bucket_count.load(std::memory_order_acquire);
0678         size_type necessary_bucket_count = current_bucket_count;
0679 
0680         // max_load_factor() is currently unsafe, so we can assume that my_max_load_factor
0681         // would not be changed during the calculation
0682         // TODO: Log2 seems useful here
0683         while (necessary_bucket_count * max_load_factor() < elements_count) {
0684                 necessary_bucket_count <<= 1;
0685         }
0686 
0687         while (!my_bucket_count.compare_exchange_strong(current_bucket_count, necessary_bucket_count)) {
0688             if (current_bucket_count >= necessary_bucket_count)
0689                 break;
0690         }
0691     }
0692 
0693     // Observers
0694     hasher hash_function() const { return my_hash_compare.hash_function(); }
0695     key_equal key_eq() const { return my_hash_compare.key_eq(); }
0696 
0697     class const_range_type {
0698     private:
0699         const concurrent_unordered_base& my_instance;
0700         node_ptr my_begin_node; // may be node* const
0701         node_ptr my_end_node;
0702         mutable node_ptr my_midpoint_node;
0703     public:
0704         using size_type = typename concurrent_unordered_base::size_type;
0705         using value_type = typename concurrent_unordered_base::value_type;
0706         using reference = typename concurrent_unordered_base::reference;
0707         using difference_type = typename concurrent_unordered_base::difference_type;
0708         using iterator = typename concurrent_unordered_base::const_iterator;
0709 
0710         bool empty() const { return my_begin_node == my_end_node; }
0711 
0712         bool is_divisible() const {
0713             return my_midpoint_node != my_end_node;
0714         }
0715 
0716         size_type grainsize() const { return 1; }
0717 
0718         const_range_type( const_range_type& range, split )
0719             : my_instance(range.my_instance),
0720               my_begin_node(range.my_midpoint_node),
0721               my_end_node(range.my_end_node)
0722         {
0723             range.my_end_node = my_begin_node;
0724             __TBB_ASSERT(!empty(), "Splitting despite the range is not divisible");
0725             __TBB_ASSERT(!range.empty(), "Splitting despite the range is not divisible");
0726             set_midpoint();
0727             range.set_midpoint();
0728         }
0729 
0730         iterator begin() const { return iterator(my_instance.first_value_node(my_begin_node)); }
0731         iterator end() const { return iterator(my_instance.first_value_node(my_end_node)); }
0732 
0733         const_range_type( const concurrent_unordered_base& table )
0734             : my_instance(table), my_begin_node(my_instance.first_value_node(const_cast<node_ptr>(&table.my_head))), my_end_node(nullptr)
0735         {
0736             set_midpoint();
0737         }
0738     private:
0739         void set_midpoint() const {
0740             if (empty()) {
0741                 my_midpoint_node = my_end_node;
0742             } else {
0743                 sokey_type invalid_key = ~sokey_type(0);
0744                 sokey_type begin_key = my_begin_node != nullptr ? my_begin_node->order_key() : invalid_key;
0745                 sokey_type end_key = my_end_node != nullptr ? my_end_node->order_key() : invalid_key;
0746 
0747                 size_type mid_bucket = reverse_bits(begin_key + (end_key - begin_key) / 2) %
0748                     my_instance.my_bucket_count.load(std::memory_order_relaxed);
0749                 while( my_instance.my_segments[mid_bucket].load(std::memory_order_relaxed) == nullptr) {
0750                     mid_bucket = my_instance.get_parent(mid_bucket);
0751                 }
0752                 if (reverse_bits(mid_bucket) > begin_key) {
0753                     // Found a dummy node between begin and end
0754                     my_midpoint_node = my_instance.first_value_node(
0755                         my_instance.my_segments[mid_bucket].load(std::memory_order_relaxed));
0756                 } else {
0757                     // Didn't find a dummy node between begin and end
0758                     my_midpoint_node = my_end_node;
0759                 }
0760             }
0761         }
0762     }; // class const_range_type
0763 
0764     class range_type : public const_range_type {
0765     public:
0766         using iterator = typename concurrent_unordered_base::iterator;
0767         using const_range_type::const_range_type;
0768 
0769         iterator begin() const { return iterator(const_range_type::begin().get_node_ptr()); }
0770         iterator end() const { return iterator(const_range_type::end().get_node_ptr()); }
0771     }; // class range_type
0772 
0773     // Parallel iteration
0774     range_type range() {
0775         return range_type(*this);
0776     }
0777 
0778     const_range_type range() const {
0779         return const_range_type(*this);
0780     }
0781 protected:
0782     static constexpr bool allow_multimapping = traits_type::allow_multimapping;
0783 
0784 private:
0785     static constexpr size_type initial_bucket_count = 8;
0786     static constexpr float initial_max_load_factor = 4; // TODO: consider 1?
0787     static constexpr size_type pointers_per_embedded_table = sizeof(size_type) * 8 - 1;
0788 
0789     class unordered_segment_table
0790         : public segment_table<std::atomic<node_ptr>, allocator_type, unordered_segment_table, pointers_per_embedded_table>
0791     {
0792         using self_type = unordered_segment_table;
0793         using atomic_node_ptr = std::atomic<node_ptr>;
0794         using base_type = segment_table<std::atomic<node_ptr>, allocator_type, unordered_segment_table, pointers_per_embedded_table>;
0795         using segment_type = typename base_type::segment_type;
0796         using base_allocator_type = typename base_type::allocator_type;
0797 
0798         using segment_allocator_type = typename allocator_traits_type::template rebind_alloc<atomic_node_ptr>;
0799         using segment_allocator_traits = tbb::detail::allocator_traits<segment_allocator_type>;
0800     public:
0801         // Segment table for unordered containers should not be extended in the wait- free implementation
0802         static constexpr bool allow_table_extending = false;
0803         static constexpr bool is_noexcept_assignment = std::is_nothrow_move_assignable<hasher>::value &&
0804                                                        std::is_nothrow_move_assignable<key_equal>::value &&
0805                                                        segment_allocator_traits::is_always_equal::value;
0806         static constexpr bool is_noexcept_swap = tbb::detail::is_nothrow_swappable<hasher>::value &&
0807                                                  tbb::detail::is_nothrow_swappable<key_equal>::value &&
0808                                                  segment_allocator_traits::is_always_equal::value;
0809 
0810         // TODO: using base_type::base_type is not compiling on Windows and Intel Compiler - investigate
0811         unordered_segment_table( const base_allocator_type& alloc = base_allocator_type() )
0812             : base_type(alloc) {}
0813 
0814         unordered_segment_table( const unordered_segment_table& ) = default;
0815 
0816         unordered_segment_table( const unordered_segment_table& other, const base_allocator_type& alloc )
0817             : base_type(other, alloc) {}
0818 
0819         unordered_segment_table( unordered_segment_table&& ) = default;
0820 
0821         unordered_segment_table( unordered_segment_table&& other, const base_allocator_type& alloc )
0822             : base_type(std::move(other), alloc) {}
0823 
0824         unordered_segment_table& operator=( const unordered_segment_table& ) = default;
0825 
0826         unordered_segment_table& operator=( unordered_segment_table&& ) = default;
0827 
0828         segment_type create_segment( typename base_type::segment_table_type, typename base_type::segment_index_type segment_index, size_type ) {
0829             segment_allocator_type alloc(this->get_allocator());
0830             size_type seg_size = this->segment_size(segment_index);
0831             segment_type new_segment = segment_allocator_traits::allocate(alloc, seg_size);
0832             for (size_type i = 0; i != seg_size; ++i) {
0833                 segment_allocator_traits::construct(alloc, new_segment + i, nullptr);
0834             }
0835             return new_segment;
0836         }
0837 
0838         segment_type nullify_segment( typename base_type::segment_table_type table, size_type segment_index ) {
0839             segment_type target_segment = table[segment_index].load(std::memory_order_relaxed);
0840             table[segment_index].store(nullptr, std::memory_order_relaxed);
0841             return target_segment;
0842         }
0843 
0844         // deallocate_segment is required by the segment_table base class, but
0845         // in unordered, it is also necessary to call the destructor during deallocation
0846         void deallocate_segment( segment_type address, size_type index ) {
0847             destroy_segment(address, index);
0848         }
0849 
0850         void destroy_segment( segment_type address, size_type index ) {
0851             segment_allocator_type alloc(this->get_allocator());
0852             for (size_type i = 0; i != this->segment_size(index); ++i) {
0853                 segment_allocator_traits::destroy(alloc, address + i);
0854             }
0855             segment_allocator_traits::deallocate(alloc, address, this->segment_size(index));
0856         }
0857 
0858 
0859         void copy_segment( size_type index, segment_type, segment_type to ) {
0860             if (index == 0) {
0861                 // The first element in the first segment is embedded into the table (my_head)
0862                 // so the first pointer should not be stored here
0863                 // It would be stored during move ctor/assignment operation
0864                 to[1].store(nullptr, std::memory_order_relaxed);
0865             } else {
0866                 for (size_type i = 0; i != this->segment_size(index); ++i) {
0867                     to[i].store(nullptr, std::memory_order_relaxed);
0868                 }
0869             }
0870         }
0871 
0872         void move_segment( size_type index, segment_type from, segment_type to ) {
0873             if (index == 0) {
0874                 // The first element in the first segment is embedded into the table (my_head)
0875                 // so the first pointer should not be stored here
0876                 // It would be stored during move ctor/assignment operation
0877                 to[1].store(from[1].load(std::memory_order_relaxed), std::memory_order_relaxed);
0878             } else {
0879                 for (size_type i = 0; i != this->segment_size(index); ++i) {
0880                     to[i].store(from[i].load(std::memory_order_relaxed), std::memory_order_relaxed);
0881                     from[i].store(nullptr, std::memory_order_relaxed);
0882                 }
0883             }
0884         }
0885 
0886         // allocate_long_table is required by the segment_table base class, but unused for unordered containers
0887         typename base_type::segment_table_type allocate_long_table( const typename base_type::atomic_segment*, size_type ) {
0888             __TBB_ASSERT(false, "This method should never been called");
0889             // TableType is a pointer
0890             return nullptr;
0891         }
0892 
0893         // destroy_elements is required by the segment_table base class, but unused for unordered containers
0894         // this function call but do nothing
0895         void destroy_elements() {}
0896     }; // struct unordered_segment_table
0897 
0898     void internal_clear() {
0899         // TODO: consider usefulness of two versions of clear() - with dummy nodes deallocation and without it
0900         node_ptr next = my_head.next();
0901         node_ptr curr = next;
0902 
0903         my_head.set_next(nullptr);
0904 
0905         while (curr != nullptr) {
0906             next = curr->next();
0907             destroy_node(curr);
0908             curr = next;
0909         }
0910 
0911         my_size.store(0, std::memory_order_relaxed);
0912         my_segments.clear();
0913     }
0914 
0915     void destroy_node( node_ptr node ) {
0916         if (node->is_dummy()) {
0917             node_allocator_type dummy_node_allocator(my_segments.get_allocator());
0918             // Destroy the node
0919             node_allocator_traits::destroy(dummy_node_allocator, node);
0920             // Deallocate the memory
0921             node_allocator_traits::deallocate(dummy_node_allocator, node, 1);
0922         } else {
0923             // GCC 11.1 issues a warning here that incorrect destructor might be called for dummy_nodes
0924             #if (__TBB_GCC_VERSION >= 110100 && __TBB_GCC_VERSION < 140000 ) && !__clang__ && !__INTEL_COMPILER
0925             volatile
0926             #endif
0927             value_node_ptr val_node = static_cast<value_node_ptr>(node);
0928             value_node_allocator_type value_node_allocator(my_segments.get_allocator());
0929             // Destroy the value
0930             value_node_allocator_traits::destroy(value_node_allocator, val_node->storage());
0931             // Destroy the node
0932             value_node_allocator_traits::destroy(value_node_allocator, val_node);
0933             // Deallocate the memory
0934             value_node_allocator_traits::deallocate(value_node_allocator, val_node, 1);
0935         }
0936     }
0937 
0938     struct internal_insert_return_type {
0939         // If the insertion failed - the remaining_node points to the node, which was failed to insert
0940         // This node can be allocated in process of insertion
0941         value_node_ptr remaining_node;
0942         // If the insertion failed - node_with_equal_key points to the node in the list with the
0943         // key, equivalent to the inserted, otherwise it points to the node, which was inserted.
0944         value_node_ptr node_with_equal_key;
0945         // Insertion status
0946         // NOTE: if it is true - remaining_node should be nullptr
0947         bool inserted;
0948     }; // struct internal_insert_return_type
0949 
0950     // Inserts the value into the split ordered list
0951     template <typename ValueType>
0952     std::pair<iterator, bool> internal_insert_value( ValueType&& value ) {
0953 
0954         auto create_value_node = [&value, this]( sokey_type order_key )->value_node_ptr {
0955             return create_node(order_key, std::forward<ValueType>(value));
0956         };
0957 
0958         auto insert_result = internal_insert(value, create_value_node);
0959 
0960         if (insert_result.remaining_node != nullptr) {
0961             // If the insertion fails - destroy the node which was failed to insert if it exist
0962             __TBB_ASSERT(!insert_result.inserted,
0963                          "remaining_node should be nullptr if the node was successfully inserted");
0964             destroy_node(insert_result.remaining_node);
0965         }
0966 
0967         return { iterator(insert_result.node_with_equal_key), insert_result.inserted };
0968     }
0969 
0970     // Inserts the node into the split ordered list
0971     // Creates a node using the specified callback after the place for insertion was found
0972     // Returns internal_insert_return_type object, where:
0973     //     - If the insertion succeeded:
0974     //         - remaining_node is nullptr
0975     //         - node_with_equal_key point to the inserted node
0976     //         - inserted is true
0977     //     - If the insertion failed:
0978     //         - remaining_node points to the node, that was failed to insert if it was created.
0979     //           nullptr if the node was not created, because the requested key was already
0980     //           presented in the list
0981     //         - node_with_equal_key point to the element in the list with the key, equivalent to
0982     //           to the requested key
0983     //         - inserted is false
0984     template <typename ValueType, typename CreateInsertNode>
0985     internal_insert_return_type internal_insert( ValueType&& value, CreateInsertNode create_insert_node ) {
0986         static_assert(std::is_same<typename std::decay<ValueType>::type, value_type>::value,
0987                       "Incorrect type in internal_insert");
0988         const key_type& key = traits_type::get_key(value);
0989         sokey_type hash_key = sokey_type(my_hash_compare(key));
0990 
0991         sokey_type order_key = split_order_key_regular(hash_key);
0992         node_ptr prev = prepare_bucket(hash_key);
0993         __TBB_ASSERT(prev != nullptr, "Invalid head node");
0994 
0995         auto search_result = search_after(prev, order_key, key);
0996 
0997         if (search_result.second) {
0998             return internal_insert_return_type{ nullptr, search_result.first, false };
0999         }
1000 
1001         value_node_ptr new_node = create_insert_node(order_key);
1002         node_ptr curr = search_result.first;
1003 
1004         while (!try_insert(prev, new_node, curr)) {
1005             search_result = search_after(prev, order_key, key);
1006             if (search_result.second) {
1007                 return internal_insert_return_type{ new_node, search_result.first, false };
1008             }
1009             curr = search_result.first;
1010         }
1011 
1012         auto sz = my_size.fetch_add(1);
1013         adjust_table_size(sz + 1, my_bucket_count.load(std::memory_order_acquire));
1014         return internal_insert_return_type{ nullptr, static_cast<value_node_ptr>(new_node), true };
1015     }
1016 
1017     // Searches the node with the key, equivalent to key with requested order key after the node prev
1018     // Returns the existing node and true if the node is already in the list
1019     // Returns the first node with the order key, greater than requested and false if the node is not presented in the list
1020     std::pair<value_node_ptr, bool> search_after( node_ptr& prev, sokey_type order_key, const key_type& key ) {
1021         // NOTE: static_cast<value_node_ptr>(curr) should be done only after we would ensure
1022         // that the node is not a dummy node
1023 
1024         node_ptr curr = prev->next();
1025 
1026         while (curr != nullptr && (curr->order_key() < order_key ||
1027                (curr->order_key() == order_key && !my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(curr)->value()), key))))
1028         {
1029             prev = curr;
1030             curr = curr->next();
1031         }
1032 
1033         if (curr != nullptr && curr->order_key() == order_key && !allow_multimapping) {
1034             return { static_cast<value_node_ptr>(curr), true };
1035         }
1036         return { static_cast<value_node_ptr>(curr), false };
1037     }
1038 
1039     void adjust_table_size( size_type total_elements, size_type current_size ) {
1040         // Grow the table by a factor of 2 if possible and needed
1041         if ( (float(total_elements) / float(current_size)) > my_max_load_factor ) {
1042             // Double the size of the hash only if size hash not changed in between loads
1043             my_bucket_count.compare_exchange_strong(current_size, 2u * current_size);
1044         }
1045     }
1046 
1047     node_ptr insert_dummy_node( node_ptr parent_dummy_node, sokey_type order_key ) {
1048         node_ptr prev_node = parent_dummy_node;
1049 
1050         node_ptr dummy_node = create_dummy_node(order_key);
1051         node_ptr next_node;
1052 
1053         do {
1054             next_node = prev_node->next();
1055             // Move forward through the list while the order key is less than requested
1056             while (next_node != nullptr && next_node->order_key() < order_key) {
1057                 prev_node = next_node;
1058                 next_node = next_node->next();
1059             }
1060 
1061             if (next_node != nullptr && next_node->order_key() == order_key) {
1062                 // Another dummy node with the same order key was inserted by another thread
1063                 // Destroy the node and exit
1064                 destroy_node(dummy_node);
1065                 return next_node;
1066             }
1067         } while (!try_insert(prev_node, dummy_node, next_node));
1068 
1069         return dummy_node;
1070     }
1071 
1072     // Try to insert a node between prev_node and expected next
1073     // If the next is not equal to expected next - return false
1074     static bool try_insert( node_ptr prev_node, node_ptr new_node, node_ptr current_next_node ) {
1075         new_node->set_next(current_next_node);
1076         return prev_node->try_set_next(current_next_node, new_node);
1077     }
1078 
1079     // Returns the bucket, associated with the hash_key
1080     node_ptr prepare_bucket( sokey_type hash_key ) {
1081         size_type bucket = hash_key % my_bucket_count.load(std::memory_order_acquire);
1082         return get_bucket(bucket);
1083     }
1084 
1085     // Initialize the corresponding bucket if it is not initialized
1086     node_ptr get_bucket( size_type bucket_index ) {
1087         if (my_segments[bucket_index].load(std::memory_order_acquire) == nullptr) {
1088             init_bucket(bucket_index);
1089         }
1090         return my_segments[bucket_index].load(std::memory_order_acquire);
1091     }
1092 
1093     void init_bucket( size_type bucket ) {
1094         if (bucket == 0) {
1095             // Atomicaly store the first bucket into my_head
1096             node_ptr disabled = nullptr;
1097             my_segments[0].compare_exchange_strong(disabled, &my_head);
1098             return;
1099         }
1100 
1101         size_type parent_bucket = get_parent(bucket);
1102 
1103         while (my_segments[parent_bucket].load(std::memory_order_acquire) == nullptr) {
1104             // Initialize all of the parent buckets
1105             init_bucket(parent_bucket);
1106         }
1107 
1108         __TBB_ASSERT(my_segments[parent_bucket].load(std::memory_order_acquire) != nullptr, "Parent bucket should be initialized");
1109         node_ptr parent = my_segments[parent_bucket].load(std::memory_order_acquire);
1110 
1111         // Insert dummy node into the list
1112         node_ptr dummy_node = insert_dummy_node(parent, split_order_key_dummy(bucket));
1113         // TODO: consider returning pair<node_ptr, bool> to avoid store operation if the bucket was stored by an other thread
1114         // or move store to insert_dummy_node
1115         // Add dummy_node into the segment table
1116         my_segments[bucket].store(dummy_node, std::memory_order_release);
1117     }
1118 
1119     node_ptr create_dummy_node( sokey_type order_key ) {
1120         node_allocator_type dummy_node_allocator(my_segments.get_allocator());
1121         node_ptr dummy_node = node_allocator_traits::allocate(dummy_node_allocator, 1);
1122         node_allocator_traits::construct(dummy_node_allocator, dummy_node, order_key);
1123         return dummy_node;
1124     }
1125 
1126     template <typename... Args>
1127     value_node_ptr create_node( sokey_type order_key, Args&&... args ) {
1128         value_node_allocator_type value_node_allocator(my_segments.get_allocator());
1129         // Allocate memory for the value_node
1130         value_node_ptr new_node = value_node_allocator_traits::allocate(value_node_allocator, 1);
1131         // Construct the node
1132         value_node_allocator_traits::construct(value_node_allocator, new_node, order_key);
1133 
1134         // try_call API is not convenient here due to broken
1135         // variadic capture on GCC 4.8.5
1136         auto value_guard = make_raii_guard([&] {
1137             value_node_allocator_traits::destroy(value_node_allocator, new_node);
1138             value_node_allocator_traits::deallocate(value_node_allocator, new_node, 1);
1139         });
1140 
1141         // Construct the value in the node
1142         value_node_allocator_traits::construct(value_node_allocator, new_node->storage(), std::forward<Args>(args)...);
1143         value_guard.dismiss();
1144         return new_node;
1145     }
1146 
1147     value_node_ptr first_value_node( node_ptr first_node ) const {
1148         while (first_node != nullptr && first_node->is_dummy()) {
1149             first_node = first_node->next();
1150         }
1151         return static_cast<value_node_ptr>(first_node);
1152     }
1153 
1154     // Unsafe method, which removes the node from the list and returns the next node
1155     node_ptr internal_erase( value_node_ptr node_to_erase ) {
1156         __TBB_ASSERT(node_to_erase != nullptr, "Invalid iterator for erase");
1157         node_ptr next_node = node_to_erase->next();
1158         internal_extract(node_to_erase);
1159         destroy_node(node_to_erase);
1160         return next_node;
1161     }
1162 
1163     template <typename K>
1164     size_type internal_erase_by_key( const K& key ) {
1165         // TODO: consider reimplementation without equal_range - it is not effective to perform lookup over a bucket
1166         // for each unsafe_erase call
1167         auto eq_range = equal_range(key);
1168         size_type erased_count = 0;
1169 
1170         for (auto it = eq_range.first; it != eq_range.second;) {
1171             it = unsafe_erase(it);
1172             ++erased_count;
1173         }
1174         return erased_count;
1175     }
1176 
1177     // Unsafe method, which extracts the node from the list
1178     void internal_extract( value_node_ptr node_to_extract ) {
1179         const key_type& key = traits_type::get_key(node_to_extract->value());
1180         sokey_type hash_key = sokey_type(my_hash_compare(key));
1181 
1182         node_ptr prev_node = prepare_bucket(hash_key);
1183 
1184         for (node_ptr node = prev_node->next(); node != nullptr; prev_node = node, node = node->next()) {
1185             if (node == node_to_extract) {
1186                 unlink_node(prev_node, node, node_to_extract->next());
1187                 my_size.store(my_size.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed);
1188                 return;
1189             }
1190             __TBB_ASSERT(node->order_key() <= node_to_extract->order_key(),
1191                          "node, which is going to be extracted should be presented in the list");
1192         }
1193     }
1194 
1195 protected:
1196     template <typename SourceType>
1197     void internal_merge( SourceType&& source ) {
1198         static_assert(std::is_same<node_type, typename std::decay<SourceType>::type::node_type>::value,
1199                       "Incompatible containers cannot be merged");
1200 
1201         for (node_ptr source_prev = &source.my_head; source_prev->next() != nullptr;) {
1202             if (!source_prev->next()->is_dummy()) {
1203                 value_node_ptr curr = static_cast<value_node_ptr>(source_prev->next());
1204                 // If the multimapping is allowed, or the key is not presented
1205                 // in the *this container - extract the node from the list
1206                 if (allow_multimapping || !contains(traits_type::get_key(curr->value()))) {
1207                     node_ptr next_node = curr->next();
1208                     source.unlink_node(source_prev, curr, next_node);
1209 
1210                     // Remember the old order key
1211                     sokey_type old_order_key = curr->order_key();
1212 
1213                     // Node handle with curr cannot be used directly in insert call, because
1214                     // the destructor of node_type will destroy curr
1215                     node_type curr_node = node_handle_accessor::construct<node_type>(curr);
1216 
1217                     // If the insertion fails - return ownership of the node to the source
1218                     if (!insert(std::move(curr_node)).second) {
1219                         __TBB_ASSERT(!allow_multimapping, "Insertion should succeed for multicontainer");
1220                         __TBB_ASSERT(source_prev->next() == next_node,
1221                                      "Concurrent operations with the source container in merge are prohibited");
1222 
1223                         // Initialize the node with the old order key, because the order key
1224                         // can change during the insertion
1225                         curr->init(old_order_key);
1226                         __TBB_ASSERT(old_order_key >= source_prev->order_key() &&
1227                                      (next_node == nullptr || old_order_key <= next_node->order_key()),
1228                                      "Wrong nodes order in the source container");
1229                         // Merge is unsafe for source container, so the insertion back can be done without compare_exchange
1230                         curr->set_next(next_node);
1231                         source_prev->set_next(curr);
1232                         source_prev = curr;
1233                         node_handle_accessor::deactivate(curr_node);
1234                     } else {
1235                         source.my_size.fetch_sub(1, std::memory_order_relaxed);
1236                     }
1237                 } else {
1238                     source_prev = curr;
1239                 }
1240             } else {
1241                 source_prev = source_prev->next();
1242             }
1243         }
1244     }
1245 
1246 private:
1247     // Unsafe method, which unlinks the node between prev and next
1248     void unlink_node( node_ptr prev_node, node_ptr node_to_unlink, node_ptr next_node ) {
1249         __TBB_ASSERT(prev_node->next() == node_to_unlink &&
1250                      node_to_unlink->next() == next_node,
1251                      "erasing and extracting nodes from the containers are unsafe in concurrent mode");
1252         prev_node->set_next(next_node);
1253         node_to_unlink->set_next(nullptr);
1254     }
1255 
1256     template <typename K>
1257     value_node_ptr internal_find( const K& key ) {
1258         sokey_type hash_key = sokey_type(my_hash_compare(key));
1259         sokey_type order_key = split_order_key_regular(hash_key);
1260 
1261         node_ptr curr = prepare_bucket(hash_key);
1262 
1263         while (curr != nullptr) {
1264             if (curr->order_key() > order_key) {
1265                 // If the order key is greater than the requested order key,
1266                 // the element is not in the hash table
1267                 return nullptr;
1268             } else if (curr->order_key() == order_key &&
1269                        my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(curr)->value()), key)) {
1270                 // The fact that order keys match does not mean that the element is found.
1271                 // Key function comparison has to be performed to check whether this is the
1272                 // right element. If not, keep searching while order key is the same.
1273                 return static_cast<value_node_ptr>(curr);
1274             }
1275             curr = curr->next();
1276         }
1277 
1278         return nullptr;
1279     }
1280 
1281     template <typename K>
1282     std::pair<value_node_ptr, value_node_ptr> internal_equal_range( const K& key ) {
1283         sokey_type hash_key = sokey_type(my_hash_compare(key));
1284         sokey_type order_key = split_order_key_regular(hash_key);
1285 
1286         node_ptr curr = prepare_bucket(hash_key);
1287 
1288         while (curr != nullptr) {
1289             if (curr->order_key() > order_key) {
1290                 // If the order key is greater than the requested order key,
1291                 // the element is not in the hash table
1292                 return std::make_pair(nullptr, nullptr);
1293             } else if (curr->order_key() == order_key &&
1294                        my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(curr)->value()), key)) {
1295                 value_node_ptr first = static_cast<value_node_ptr>(curr);
1296                 node_ptr last = first;
1297                 do {
1298                     last = last->next();
1299                 } while (allow_multimapping && last != nullptr && !last->is_dummy() &&
1300                         my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(last)->value()), key));
1301                 return std::make_pair(first, first_value_node(last));
1302             }
1303             curr = curr->next();
1304         }
1305         return {nullptr, nullptr};
1306     }
1307 
1308     template <typename K>
1309     size_type internal_count( const K& key ) const {
1310         if (allow_multimapping) {
1311             // TODO: consider reimplementing the internal_equal_range with elements counting to avoid std::distance
1312             auto eq_range = equal_range(key);
1313             return std::distance(eq_range.first, eq_range.second);
1314         } else {
1315             return contains(key) ? 1 : 0;
1316         }
1317     }
1318 
1319     void internal_copy( const concurrent_unordered_base& other ) {
1320         node_ptr last_node = &my_head;
1321         my_segments[0].store(&my_head, std::memory_order_relaxed);
1322 
1323         for (node_ptr node = other.my_head.next(); node != nullptr; node = node->next()) {
1324             node_ptr new_node;
1325             if (!node->is_dummy()) {
1326                 // The node in the right table contains a value
1327                 new_node = create_node(node->order_key(), static_cast<value_node_ptr>(node)->value());
1328             } else {
1329                 // The node in the right table is a dummy node
1330                 new_node = create_dummy_node(node->order_key());
1331                 my_segments[reverse_bits(node->order_key())].store(new_node, std::memory_order_relaxed);
1332             }
1333 
1334             last_node->set_next(new_node);
1335             last_node = new_node;
1336         }
1337     }
1338 
1339     void internal_move( concurrent_unordered_base&& other ) {
1340         node_ptr last_node = &my_head;
1341         my_segments[0].store(&my_head, std::memory_order_relaxed);
1342 
1343         for (node_ptr node = other.my_head.next(); node != nullptr; node = node->next()) {
1344             node_ptr new_node;
1345             if (!node->is_dummy()) {
1346                 // The node in the right table contains a value
1347                 new_node = create_node(node->order_key(), std::move(static_cast<value_node_ptr>(node)->value()));
1348             } else {
1349                 // TODO: do we need to destroy a dummy node in the right container?
1350                 // The node in the right table is a dummy_node
1351                 new_node = create_dummy_node(node->order_key());
1352                 my_segments[reverse_bits(node->order_key())].store(new_node, std::memory_order_relaxed);
1353             }
1354 
1355             last_node->set_next(new_node);
1356             last_node = new_node;
1357         }
1358     }
1359 
1360     void move_content( concurrent_unordered_base&& other ) {
1361         // NOTE: allocators should be equal
1362         my_head.set_next(other.my_head.next());
1363         other.my_head.set_next(nullptr);
1364         my_segments[0].store(&my_head, std::memory_order_relaxed);
1365 
1366         other.my_bucket_count.store(initial_bucket_count, std::memory_order_relaxed);
1367         other.my_max_load_factor = initial_max_load_factor;
1368         other.my_size.store(0, std::memory_order_relaxed);
1369     }
1370 
1371     void internal_move_construct_with_allocator( concurrent_unordered_base&& other, const allocator_type&,
1372                                                  /*is_always_equal = */std::true_type ) {
1373         // Allocators are always equal - no need to compare for equality
1374         move_content(std::move(other));
1375     }
1376 
1377     void internal_move_construct_with_allocator( concurrent_unordered_base&& other, const allocator_type& alloc,
1378                                                  /*is_always_equal = */std::false_type ) {
1379         // Allocators are not always equal
1380         if (alloc == other.my_segments.get_allocator()) {
1381             move_content(std::move(other));
1382         } else {
1383             try_call( [&] {
1384                 internal_move(std::move(other));
1385             } ).on_exception( [&] {
1386                 clear();
1387             });
1388         }
1389     }
1390 
1391     // Move assigns the hash table to other is any instances of allocator_type are always equal
1392     // or propagate_on_container_move_assignment is true
1393     void internal_move_assign( concurrent_unordered_base&& other, /*is_always_equal || POCMA = */std::true_type ) {
1394         move_content(std::move(other));
1395     }
1396 
1397     // Move assigns the hash table to other is any instances of allocator_type are not always equal
1398     // and propagate_on_container_move_assignment is false
1399     void internal_move_assign( concurrent_unordered_base&& other, /*is_always_equal || POCMA = */std::false_type ) {
1400         if (my_segments.get_allocator() == other.my_segments.get_allocator()) {
1401             move_content(std::move(other));
1402         } else {
1403             // TODO: guards for exceptions
1404             internal_move(std::move(other));
1405         }
1406     }
1407 
1408     void internal_swap( concurrent_unordered_base& other, /*is_always_equal || POCS = */std::true_type ) {
1409         internal_swap_fields(other);
1410     }
1411 
1412     void internal_swap( concurrent_unordered_base& other, /*is_always_equal || POCS = */std::false_type ) {
1413         __TBB_ASSERT(my_segments.get_allocator() == other.my_segments.get_allocator(),
1414                      "Swapping with unequal allocators is not allowed");
1415         internal_swap_fields(other);
1416     }
1417 
1418     void internal_swap_fields( concurrent_unordered_base& other ) {
1419         node_ptr first_node = my_head.next();
1420         my_head.set_next(other.my_head.next());
1421         other.my_head.set_next(first_node);
1422 
1423         size_type current_size = my_size.load(std::memory_order_relaxed);
1424         my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
1425         other.my_size.store(current_size, std::memory_order_relaxed);
1426 
1427         size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
1428         my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed);
1429         other.my_bucket_count.store(bucket_count, std::memory_order_relaxed);
1430 
1431         using std::swap;
1432         swap(my_max_load_factor, other.my_max_load_factor);
1433         swap(my_hash_compare, other.my_hash_compare);
1434         my_segments.swap(other.my_segments);
1435 
1436         // swap() method from segment table swaps all of the segments including the first segment
1437         // We should restore it to my_head. Without it the first segment of the container will point
1438         // to other.my_head.
1439         my_segments[0].store(&my_head, std::memory_order_relaxed);
1440         other.my_segments[0].store(&other.my_head, std::memory_order_relaxed);
1441     }
1442 
1443     // A regular order key has its original hash value reversed and the last bit set
1444     static constexpr sokey_type split_order_key_regular( sokey_type hash ) {
1445         return reverse_bits(hash) | 0x1;
1446     }
1447 
1448     // A dummy order key has its original hash value reversed and the last bit unset
1449     static constexpr sokey_type split_order_key_dummy( sokey_type hash ) {
1450         return reverse_bits(hash) & ~sokey_type(0x1);
1451     }
1452 
1453     size_type get_parent( size_type bucket ) const {
1454         // Unset bucket's most significant turned-on bit
1455         __TBB_ASSERT(bucket != 0, "Unable to get_parent of the bucket 0");
1456         size_type msb = tbb::detail::log2(bucket);
1457         return bucket & ~(size_type(1) << msb);
1458     }
1459 
1460     size_type get_next_bucket_index( size_type bucket ) const {
1461         size_type bits = tbb::detail::log2(my_bucket_count.load(std::memory_order_relaxed));
1462         size_type reversed_next = reverse_n_bits(bucket, bits) + 1;
1463         return reverse_n_bits(reversed_next, bits);
1464     }
1465 
1466     std::atomic<size_type> my_size;
1467     std::atomic<size_type> my_bucket_count;
1468     float my_max_load_factor;
1469     hash_compare_type my_hash_compare;
1470 
1471     list_node_type my_head; // Head node for split ordered list
1472     unordered_segment_table my_segments; // Segment table of pointers to nodes
1473 
1474     template <typename Container, typename Value>
1475     friend class solist_iterator;
1476 
1477     template <typename OtherTraits>
1478     friend class concurrent_unordered_base;
1479 }; // class concurrent_unordered_base
1480 
1481 template <typename Traits>
1482 bool operator==( const concurrent_unordered_base<Traits>& lhs,
1483                  const concurrent_unordered_base<Traits>& rhs ) {
1484     if (&lhs == &rhs) { return true; }
1485     if (lhs.size() != rhs.size()) { return false; }
1486 
1487 #if _MSC_VER
1488     // Passing "unchecked" iterators to std::permutation with 3 parameters
1489     // causes compiler warnings.
1490     // The workaround is to use overload with 4 parameters, which is
1491     // available since C++14 - minimally supported version on MSVC
1492     return std::is_permutation(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
1493 #else
1494     return std::is_permutation(lhs.begin(), lhs.end(), rhs.begin());
1495 #endif
1496 }
1497 
1498 #if !__TBB_CPP20_COMPARISONS_PRESENT
1499 template <typename Traits>
1500 bool operator!=( const concurrent_unordered_base<Traits>& lhs,
1501                  const concurrent_unordered_base<Traits>& rhs ) {
1502     return !(lhs == rhs);
1503 }
1504 #endif
1505 
1506 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1507 #pragma warning(pop) // warning 4127 is back
1508 #endif
1509 
1510 } // namespace d1
1511 } // namespace detail
1512 } // namespace tbb
1513 
1514 #endif // __TBB_detail__concurrent_unordered_base_H