Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/oneapi/tbb/concurrent_hash_map.h was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 /*
0002     Copyright (c) 2005-2022 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_concurrent_hash_map_H
0018 #define __TBB_concurrent_hash_map_H
0019 
0020 #include "detail/_namespace_injection.h"
0021 #include "detail/_utils.h"
0022 #include "detail/_assert.h"
0023 #include "detail/_allocator_traits.h"
0024 #include "detail/_containers_helpers.h"
0025 #include "detail/_template_helpers.h"
0026 #include "detail/_hash_compare.h"
0027 #include "detail/_range_common.h"
0028 #include "tbb_allocator.h"
0029 #include "spin_rw_mutex.h"
0030 
0031 #include <atomic>
0032 #include <initializer_list>
0033 #include <tuple>
0034 #include <iterator>
0035 #include <utility>      // Need std::pair
0036 #include <cstring>      // Need std::memset
0037 
0038 namespace tbb {
0039 namespace detail {
0040 namespace d2 {
0041 
0042 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS && __TBB_CPP20_CONCEPTS_PRESENT
0043 template <typename Mutex>
0044 concept ch_map_rw_scoped_lockable = rw_scoped_lockable<Mutex> &&
0045     requires(const typename Mutex::scoped_lock& sl) {
0046         { sl.is_writer() } -> std::convertible_to<bool>;
0047 };
0048 #endif
0049 
0050 template <typename MutexType>
0051 struct hash_map_node_base : no_copy {
0052     using mutex_type = MutexType;
0053     // Scoped lock type for mutex
0054     using scoped_type = typename MutexType::scoped_lock;
0055     // Next node in chain
0056     hash_map_node_base* next;
0057     mutex_type mutex;
0058 };
0059 
0060 // Incompleteness flag value
0061 static void* const rehash_req_flag = reinterpret_cast<void*>(std::size_t(3));
0062 // Rehashed empty bucket flag
0063 static void* const empty_rehashed_flag = reinterpret_cast<void*>(std::size_t(0));
0064 
0065 template <typename MutexType>
0066 bool rehash_required( hash_map_node_base<MutexType>* node_ptr ) {
0067     return reinterpret_cast<void*>(node_ptr) == rehash_req_flag;
0068 }
0069 
0070 #if TBB_USE_ASSERT
0071 template <typename MutexType>
0072 bool empty_rehashed( hash_map_node_base<MutexType>* node_ptr ) {
0073     return reinterpret_cast<void*>(node_ptr) == empty_rehashed_flag;
0074 }
0075 #endif
0076 
0077 // base class of concurrent_hash_map
0078 
0079 template <typename Allocator, typename MutexType>
0080 class hash_map_base {
0081 public:
0082     using size_type = std::size_t;
0083     using hashcode_type = std::size_t;
0084     using segment_index_type = std::size_t;
0085     using node_base = hash_map_node_base<MutexType>;
0086 
0087     struct bucket : no_copy {
0088         using mutex_type = MutexType;
0089         using scoped_type = typename mutex_type::scoped_lock;
0090 
0091         bucket() : node_list(nullptr) {}
0092         bucket( node_base* ptr ) : node_list(ptr) {}
0093 
0094         mutex_type mutex;
0095         std::atomic<node_base*> node_list;
0096     };
0097 
0098     using allocator_type = Allocator;
0099     using allocator_traits_type = tbb::detail::allocator_traits<allocator_type>;
0100     using bucket_allocator_type = typename allocator_traits_type::template rebind_alloc<bucket>;
0101     using bucket_allocator_traits = tbb::detail::allocator_traits<bucket_allocator_type>;
0102 
0103     // Count of segments in the first block
0104     static constexpr size_type embedded_block = 1;
0105     // Count of segments in the first block
0106     static constexpr size_type embedded_buckets = 1 << embedded_block;
0107     // Count of segments in the first block
0108     static constexpr size_type first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
0109     // Size of a pointer / table size
0110     static constexpr size_type pointers_per_table = sizeof(segment_index_type) * 8; // one segment per bit
0111 
0112     using segment_ptr_type = bucket*;
0113     using atomic_segment_type = std::atomic<segment_ptr_type>;
0114     using segments_table_type = atomic_segment_type[pointers_per_table];
0115 
0116     hash_map_base( const allocator_type& alloc ) : my_allocator(alloc), my_mask(embedded_buckets - 1), my_size(0) {
0117         for (size_type i = 0; i != embedded_buckets; ++i) {
0118             my_embedded_segment[i].node_list.store(nullptr, std::memory_order_relaxed);
0119         }
0120 
0121         for (size_type segment_index = 0; segment_index < pointers_per_table; ++segment_index) {
0122             auto argument = segment_index < embedded_block ? my_embedded_segment + segment_base(segment_index) : nullptr;
0123             my_table[segment_index].store(argument, std::memory_order_relaxed);
0124         }
0125 
0126         __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
0127     }
0128 
0129     // segment index of given index in the array
0130     static segment_index_type segment_index_of( size_type index ) {
0131         return segment_index_type(tbb::detail::log2( index|1 ));
0132     }
0133 
0134     // the first array index of given segment
0135     static segment_index_type segment_base( segment_index_type k ) {
0136         return (segment_index_type(1) << k & ~segment_index_type(1));
0137     }
0138 
0139     // segment size except for k == 0
0140     static size_type segment_size( segment_index_type k ) {
0141         return size_type(1) << k; // fake value for k==0
0142     }
0143 
0144     // true if ptr is valid pointer
0145     static bool is_valid( void* ptr ) {
0146         return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
0147     }
0148 
0149     template <typename... Args>
0150     void init_buckets_impl( segment_ptr_type ptr, size_type sz, const Args&... args ) {
0151         for (size_type i = 0; i < sz; ++i) {
0152             bucket_allocator_traits::construct(my_allocator, ptr + i, args...);
0153         }
0154     }
0155 
0156     // Initialize buckets
0157     void init_buckets( segment_ptr_type ptr, size_type sz, bool is_initial ) {
0158         if (is_initial) {
0159             init_buckets_impl(ptr, sz);
0160         } else {
0161             init_buckets_impl(ptr, sz, reinterpret_cast<node_base*>(rehash_req_flag));
0162         }
0163     }
0164 
0165     // Add node n to bucket b
0166     static void add_to_bucket( bucket* b, node_base* n ) {
0167         __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), nullptr);
0168         n->next = b->node_list.load(std::memory_order_relaxed);
0169         b->node_list.store(n, std::memory_order_relaxed); // its under lock and flag is set
0170     }
0171 
0172     const bucket_allocator_type& get_allocator() const {
0173         return my_allocator;
0174     }
0175 
0176     bucket_allocator_type& get_allocator() {
0177         return my_allocator;
0178     }
0179 
0180     // Enable segment
0181     void enable_segment( segment_index_type k, bool is_initial = false ) {
0182         __TBB_ASSERT( k, "Zero segment must be embedded" );
0183         size_type sz;
0184         __TBB_ASSERT( !is_valid(my_table[k].load(std::memory_order_relaxed)), "Wrong concurrent assignment");
0185         if (k >= first_block) {
0186             sz = segment_size(k);
0187             segment_ptr_type ptr = nullptr;
0188             try_call( [&] {
0189                 ptr = bucket_allocator_traits::allocate(my_allocator, sz);
0190             } ).on_exception( [&] {
0191                 my_table[k].store(nullptr, std::memory_order_relaxed);
0192             });
0193 
0194             __TBB_ASSERT(ptr, nullptr);
0195             init_buckets(ptr, sz, is_initial);
0196             my_table[k].store(ptr, std::memory_order_release);
0197             sz <<= 1;// double it to get entire capacity of the container
0198         } else { // the first block
0199             __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
0200             sz = segment_size(first_block);
0201             segment_ptr_type ptr = nullptr;
0202             try_call( [&] {
0203                 ptr = bucket_allocator_traits::allocate(my_allocator, sz - embedded_buckets);
0204             } ).on_exception( [&] {
0205                 my_table[k].store(nullptr, std::memory_order_relaxed);
0206             });
0207 
0208             __TBB_ASSERT(ptr, nullptr);
0209             init_buckets(ptr, sz - embedded_buckets, is_initial);
0210             ptr -= segment_base(embedded_block);
0211             for(segment_index_type i = embedded_block; i < first_block; i++) // calc the offsets
0212                 my_table[i].store(ptr + segment_base(i), std::memory_order_release);
0213         }
0214         my_mask.store(sz-1, std::memory_order_release);
0215     }
0216 
0217     void delete_segment( segment_index_type s ) {
0218         segment_ptr_type buckets_ptr = my_table[s].load(std::memory_order_relaxed);
0219         size_type sz = segment_size( s ? s : 1 );
0220 
0221         size_type deallocate_size = 0;
0222 
0223         if (s >= first_block) { // the first segment or the next
0224             deallocate_size = sz;
0225         } else if (s == embedded_block && embedded_block != first_block) {
0226             deallocate_size = segment_size(first_block) - embedded_buckets;
0227         }
0228 
0229         for (size_type i = 0; i < deallocate_size; ++i) {
0230             bucket_allocator_traits::destroy(my_allocator, buckets_ptr + i);
0231         }
0232         if (deallocate_size != 0) {
0233             bucket_allocator_traits::deallocate(my_allocator, buckets_ptr, deallocate_size);
0234         }
0235 
0236         if (s >= embedded_block) my_table[s].store(nullptr, std::memory_order_relaxed);
0237     }
0238 
0239     // Get bucket by (masked) hashcode
0240     bucket *get_bucket( hashcode_type h ) const noexcept {
0241         segment_index_type s = segment_index_of( h );
0242         h -= segment_base(s);
0243         segment_ptr_type seg = my_table[s].load(std::memory_order_acquire);
0244         __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
0245         return &seg[h];
0246     }
0247 
0248     // detail serial rehashing helper
0249     void mark_rehashed_levels( hashcode_type h ) noexcept {
0250         segment_index_type s = segment_index_of( h );
0251         while (segment_ptr_type seg = my_table[++s].load(std::memory_order_relaxed))
0252             if (rehash_required(seg[h].node_list.load(std::memory_order_relaxed))) {
0253                 seg[h].node_list.store(reinterpret_cast<node_base*>(empty_rehashed_flag), std::memory_order_relaxed);
0254                 mark_rehashed_levels( h + ((hashcode_type)1<<s) ); // optimized segment_base(s)
0255             }
0256     }
0257 
0258     // Check for mask race
0259     // Splitting into two functions should help inlining
0260     inline bool check_mask_race( const hashcode_type h, hashcode_type &m ) const {
0261         hashcode_type m_now, m_old = m;
0262         m_now = my_mask.load(std::memory_order_acquire);
0263         if (m_old != m_now) {
0264             return check_rehashing_collision(h, m_old, m = m_now);
0265         }
0266         return false;
0267     }
0268 
0269     // Process mask race, check for rehashing collision
0270     bool check_rehashing_collision( const hashcode_type h, hashcode_type m_old, hashcode_type m ) const {
0271         __TBB_ASSERT(m_old != m, nullptr); // TODO?: m arg could be optimized out by passing h = h&m
0272         if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
0273             // condition above proves that 'h' has some other bits set beside 'm_old'
0274             // find next applicable mask after m_old    //TODO: look at bsl instruction
0275             for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
0276                 ;
0277             m_old = (m_old<<1) - 1; // get full mask from a bit
0278             __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, nullptr);
0279             // check whether it is rehashing/ed
0280             if (!rehash_required(get_bucket(h & m_old)->node_list.load(std::memory_order_acquire))) {
0281                 return true;
0282             }
0283         }
0284         return false;
0285     }
0286 
0287     // Insert a node and check for load factor. @return segment index to enable.
0288     segment_index_type insert_new_node( bucket *b, node_base *n, hashcode_type mask ) {
0289         size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
0290         add_to_bucket( b, n );
0291         // check load factor
0292         if( sz >= mask ) { // TODO: add custom load_factor
0293             segment_index_type new_seg = tbb::detail::log2( mask+1 ); //optimized segment_index_of
0294             __TBB_ASSERT( is_valid(my_table[new_seg-1].load(std::memory_order_relaxed)), "new allocations must not publish new mask until segment has allocated");
0295             static const segment_ptr_type is_allocating = segment_ptr_type(2);
0296             segment_ptr_type disabled = nullptr;
0297             if (!(my_table[new_seg].load(std::memory_order_acquire))
0298                 && my_table[new_seg].compare_exchange_strong(disabled, is_allocating))
0299                 return new_seg; // The value must be processed
0300         }
0301         return 0;
0302     }
0303 
0304     // Prepare enough segments for number of buckets
0305     void reserve(size_type buckets) {
0306         if( !buckets-- ) return;
0307         bool is_initial = !my_size.load(std::memory_order_relaxed);
0308         for (size_type m = my_mask.load(std::memory_order_relaxed); buckets > m;
0309             m = my_mask.load(std::memory_order_relaxed))
0310         {
0311             enable_segment( segment_index_of( m+1 ), is_initial );
0312         }
0313     }
0314 
0315     // Swap hash_map_bases
0316     void internal_swap_content(hash_map_base &table) {
0317         using std::swap;
0318         swap_atomics_relaxed(my_mask, table.my_mask);
0319         swap_atomics_relaxed(my_size, table.my_size);
0320 
0321         for(size_type i = 0; i < embedded_buckets; i++) {
0322             auto temp = my_embedded_segment[i].node_list.load(std::memory_order_relaxed);
0323             my_embedded_segment[i].node_list.store(table.my_embedded_segment[i].node_list.load(std::memory_order_relaxed),
0324                 std::memory_order_relaxed);
0325             table.my_embedded_segment[i].node_list.store(temp, std::memory_order_relaxed);
0326         }
0327         for(size_type i = embedded_block; i < pointers_per_table; i++) {
0328             auto temp = my_table[i].load(std::memory_order_relaxed);
0329             my_table[i].store(table.my_table[i].load(std::memory_order_relaxed),
0330                 std::memory_order_relaxed);
0331             table.my_table[i].store(temp, std::memory_order_relaxed);
0332         }
0333     }
0334 
0335     void internal_move(hash_map_base&& other) {
0336         my_mask.store(other.my_mask.load(std::memory_order_relaxed), std::memory_order_relaxed);
0337         other.my_mask.store(embedded_buckets - 1, std::memory_order_relaxed);
0338 
0339         my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
0340         other.my_size.store(0, std::memory_order_relaxed);
0341 
0342         for (size_type i = 0; i < embedded_buckets; ++i) {
0343             my_embedded_segment[i].node_list.store(other.my_embedded_segment[i].node_list, std::memory_order_relaxed);
0344             other.my_embedded_segment[i].node_list.store(nullptr, std::memory_order_relaxed);
0345         }
0346 
0347         for (size_type i = embedded_block; i < pointers_per_table; ++i) {
0348             my_table[i].store(other.my_table[i].load(std::memory_order_relaxed),
0349                 std::memory_order_relaxed);
0350             other.my_table[i].store(nullptr, std::memory_order_relaxed);
0351         }
0352     }
0353 
0354 protected:
0355     bucket_allocator_type my_allocator;
0356     // Hash mask = sum of allocated segment sizes - 1
0357     std::atomic<hashcode_type> my_mask;
0358     // Size of container in stored items
0359     std::atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
0360     // Zero segment
0361     bucket my_embedded_segment[embedded_buckets];
0362     // Segment pointers table. Also prevents false sharing between my_mask and my_size
0363     segments_table_type my_table;
0364 };
0365 
0366 template <typename Iterator>
0367 class hash_map_range;
0368 
0369 // Meets requirements of a forward iterator for STL
0370 // Value is either the T or const T type of the container.
0371 template <typename Container, typename Value>
0372 class hash_map_iterator {
0373     using map_type = Container;
0374     using node = typename Container::node;
0375     using map_base = typename Container::base_type;
0376     using node_base = typename map_base::node_base;
0377     using bucket = typename map_base::bucket;
0378 public:
0379     using value_type = Value;
0380     using size_type = typename Container::size_type;
0381     using difference_type = typename Container::difference_type;
0382     using pointer = value_type*;
0383     using reference = value_type&;
0384     using iterator_category = std::forward_iterator_tag;
0385 
0386     // Construct undefined iterator
0387     hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {}
0388     hash_map_iterator( const hash_map_iterator<Container, typename Container::value_type>& other ) :
0389         my_map(other.my_map),
0390         my_index(other.my_index),
0391         my_bucket(other.my_bucket),
0392         my_node(other.my_node)
0393     {}
0394 
0395     hash_map_iterator& operator=( const hash_map_iterator<Container, typename Container::value_type>& other ) {
0396         my_map = other.my_map;
0397         my_index = other.my_index;
0398         my_bucket = other.my_bucket;
0399         my_node = other.my_node;
0400         return *this;
0401     }
0402 
0403     Value& operator*() const {
0404         __TBB_ASSERT( map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
0405         return my_node->value();
0406     }
0407 
0408     Value* operator->() const {return &operator*();}
0409 
0410     hash_map_iterator& operator++() {
0411         my_node = static_cast<node*>( my_node->next );
0412         if( !my_node ) advance_to_next_bucket();
0413         return *this;
0414     }
0415 
0416     // Post increment
0417     hash_map_iterator operator++(int) {
0418         hash_map_iterator old(*this);
0419         operator++();
0420         return old;
0421     }
0422 private:
0423     template <typename C, typename T, typename U>
0424     friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
0425 
0426     template <typename C, typename T, typename U>
0427     friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
0428 
0429     template <typename C, typename T, typename U>
0430     friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
0431 
0432     template <typename C, typename U>
0433     friend class hash_map_iterator;
0434 
0435     template <typename I>
0436     friend class hash_map_range;
0437 
0438     void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
0439         size_t k = my_index+1;
0440         __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
0441         while (k <= my_map->my_mask.load(std::memory_order_relaxed)) {
0442             // Following test uses 2's-complement wizardry
0443             if( k&(k-2) ) // not the beginning of a segment
0444                 ++my_bucket;
0445             else my_bucket = my_map->get_bucket( k );
0446             node_base *n = my_bucket->node_list.load(std::memory_order_relaxed);
0447             if( map_base::is_valid(n) ) {
0448                 my_node = static_cast<node*>(n);
0449                 my_index = k;
0450                 return;
0451             }
0452             ++k;
0453         }
0454         my_bucket = nullptr; my_node = nullptr; my_index = k; // the end
0455     }
0456 
0457     template <typename Key, typename T, typename HashCompare, typename A
0458 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
0459             , typename M
0460              >
0461         __TBB_requires(tbb::detail::hash_compare<HashCompare, Key> &&
0462                        ch_map_rw_scoped_lockable<M>)
0463 #else
0464              >
0465         __TBB_requires(tbb::detail::hash_compare<HashCompare, Key>)
0466 #endif
0467     friend class concurrent_hash_map;
0468 
0469     hash_map_iterator( const Container &map, std::size_t index, const bucket *b, node_base *n ) :
0470         my_map(&map), my_index(index), my_bucket(b), my_node(static_cast<node*>(n))
0471     {
0472         if( b && !map_base::is_valid(n) )
0473             advance_to_next_bucket();
0474     }
0475 
0476     // concurrent_hash_map over which we are iterating.
0477     const Container *my_map;
0478     // Index in hash table for current item
0479     size_t my_index;
0480     // Pointer to bucket
0481     const bucket* my_bucket;
0482     // Pointer to node that has current item
0483     node* my_node;
0484 };
0485 
0486 template <typename Container, typename T, typename U>
0487 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
0488     return i.my_node == j.my_node && i.my_map == j.my_map;
0489 }
0490 
0491 template <typename Container, typename T, typename U>
0492 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
0493     return i.my_node != j.my_node || i.my_map != j.my_map;
0494 }
0495 
0496 // Range class used with concurrent_hash_map
0497 template <typename Iterator>
0498 class hash_map_range {
0499     using map_type = typename Iterator::map_type;
0500 public:
0501     // Type for size of a range
0502     using size_type = std::size_t;
0503     using value_type = typename Iterator::value_type;
0504     using reference = typename Iterator::reference;
0505     using difference_type = typename Iterator::difference_type;
0506     using iterator = Iterator;
0507 
0508     // True if range is empty.
0509     bool empty() const { return my_begin == my_end; }
0510 
0511     // True if range can be partitioned into two subranges.
0512     bool is_divisible() const {
0513         return my_midpoint != my_end;
0514     }
0515 
0516     // Split range.
0517     hash_map_range( hash_map_range& r, split ) :
0518         my_end(r.my_end),
0519         my_grainsize(r.my_grainsize)
0520     {
0521         r.my_end = my_begin = r.my_midpoint;
0522         __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
0523         __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
0524         set_midpoint();
0525         r.set_midpoint();
0526     }
0527 
0528     // Init range with container and grainsize specified
0529     hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
0530         my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list.load(std::memory_order_relaxed) ) ),
0531         my_end( Iterator( map, map.my_mask.load(std::memory_order_relaxed) + 1, nullptr, nullptr ) ),
0532         my_grainsize( grainsize_ )
0533     {
0534         __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
0535         set_midpoint();
0536     }
0537 
0538     Iterator begin() const { return my_begin; }
0539     Iterator end() const { return my_end; }
0540     // The grain size for this range.
0541     size_type grainsize() const { return my_grainsize; }
0542 
0543 private:
0544     Iterator my_begin;
0545     Iterator my_end;
0546     mutable Iterator my_midpoint;
0547     size_t my_grainsize;
0548     // Set my_midpoint to point approximately half way between my_begin and my_end.
0549     void set_midpoint() const;
0550     template <typename U> friend class hash_map_range;
0551 };
0552 
0553 template <typename Iterator>
0554 void hash_map_range<Iterator>::set_midpoint() const {
0555     // Split by groups of nodes
0556     size_t m = my_end.my_index-my_begin.my_index;
0557     if( m > my_grainsize ) {
0558         m = my_begin.my_index + m/2u;
0559         auto b = my_begin.my_map->get_bucket(m);
0560         my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list.load(std::memory_order_relaxed));
0561     } else {
0562         my_midpoint = my_end;
0563     }
0564     __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
0565         "my_begin is after my_midpoint" );
0566     __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
0567         "my_midpoint is after my_end" );
0568     __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
0569         "[my_begin, my_midpoint) range should not be empty" );
0570 }
0571 
0572 template <typename Key, typename T,
0573           typename HashCompare = d1::tbb_hash_compare<Key>,
0574           typename Allocator = tbb_allocator<std::pair<const Key, T>>
0575 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
0576         , typename MutexType = spin_rw_mutex
0577          >
0578     __TBB_requires(tbb::detail::hash_compare<HashCompare, Key> &&
0579                    ch_map_rw_scoped_lockable<MutexType>)
0580 #else
0581          >
0582     __TBB_requires(tbb::detail::hash_compare<HashCompare, Key>)
0583 #endif
0584 class concurrent_hash_map
0585 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
0586     : protected hash_map_base<Allocator, MutexType>
0587 #else
0588     : protected hash_map_base<Allocator, spin_rw_mutex>
0589 #endif
0590 {
0591     template <typename Container, typename Value>
0592     friend class hash_map_iterator;
0593 
0594     template <typename I>
0595     friend class hash_map_range;
0596     using allocator_traits_type = tbb::detail::allocator_traits<Allocator>;
0597 
0598 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
0599     using base_type = hash_map_base<Allocator, MutexType>;
0600 #else
0601     using base_type = hash_map_base<Allocator, spin_rw_mutex>;
0602 #endif
0603 public:
0604     using key_type = Key;
0605     using mapped_type = T;
0606     // type_identity is needed to disable implicit deduction guides for std::initializer_list constructors
0607     // and copy/move constructor with explicit allocator argument
0608     using allocator_type = tbb::detail::type_identity_t<Allocator>;
0609     using hash_compare_type = tbb::detail::type_identity_t<HashCompare>;
0610     using value_type = std::pair<const Key, T>;
0611     using size_type = typename base_type::size_type;
0612     using difference_type = std::ptrdiff_t;
0613 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
0614     using mutex_type = MutexType;
0615 #endif
0616     using pointer = typename allocator_traits_type::pointer;
0617     using const_pointer = typename allocator_traits_type::const_pointer;
0618 
0619     using reference = value_type&;
0620     using const_reference = const value_type&;
0621     using iterator = hash_map_iterator<concurrent_hash_map, value_type>;
0622     using const_iterator = hash_map_iterator<concurrent_hash_map, const value_type>;
0623     using range_type = hash_map_range<iterator>;
0624     using const_range_type = hash_map_range<const_iterator>;
0625 
0626 protected:
0627     static_assert(std::is_same<value_type, typename Allocator::value_type>::value,
0628         "value_type of the container must be the same as its allocator's");
0629 
0630     friend class const_accessor;
0631     class node;
0632     using segment_index_type = typename base_type::segment_index_type;
0633     using segment_ptr_type = typename base_type::segment_ptr_type;
0634     using node_base = typename base_type::node_base;
0635     using bucket = typename base_type::bucket;
0636     using hashcode_type = typename base_type::hashcode_type;
0637     using bucket_allocator_type = typename base_type::bucket_allocator_type;
0638     using node_allocator_type = typename base_type::allocator_traits_type::template rebind_alloc<node>;
0639     using node_allocator_traits = tbb::detail::allocator_traits<node_allocator_type>;
0640     hash_compare_type my_hash_compare;
0641 
0642     class node : public node_base {
0643     public:
0644         node() {}
0645         ~node() {}
0646         pointer storage() { return &my_value; }
0647         value_type& value() { return *storage(); }
0648     private:
0649         union {
0650             value_type my_value;
0651         };
0652     };
0653 
0654     void delete_node( node_base *n ) {
0655         node_allocator_type node_allocator(this->get_allocator());
0656         node_allocator_traits::destroy(node_allocator, static_cast<node*>(n)->storage());
0657         node_allocator_traits::destroy(node_allocator, static_cast<node*>(n));
0658         node_allocator_traits::deallocate(node_allocator, static_cast<node*>(n), 1);
0659     }
0660 
0661     template <typename... Args>
0662     static node* create_node(bucket_allocator_type& allocator, Args&&... args) {
0663         node_allocator_type node_allocator(allocator);
0664         node* node_ptr = node_allocator_traits::allocate(node_allocator, 1);
0665         auto guard = make_raii_guard([&] {
0666             node_allocator_traits::destroy(node_allocator, node_ptr);
0667             node_allocator_traits::deallocate(node_allocator, node_ptr, 1);
0668         });
0669 
0670         node_allocator_traits::construct(node_allocator, node_ptr);
0671         node_allocator_traits::construct(node_allocator, node_ptr->storage(), std::forward<Args>(args)...);
0672         guard.dismiss();
0673         return node_ptr;
0674     }
0675 
0676     static node* allocate_node_copy_construct(bucket_allocator_type& allocator, const Key &key, const T * t){
0677         return create_node(allocator, key, *t);
0678     }
0679 
0680     static node* allocate_node_move_construct(bucket_allocator_type& allocator, const Key &key, const T * t){
0681         return create_node(allocator, key, std::move(*const_cast<T*>(t)));
0682     }
0683 
0684     template <typename K = Key>
0685     static node* allocate_node_default_construct(bucket_allocator_type& allocator, const K &key, const T * ){
0686         // Emplace construct an empty T object inside the pair
0687         return create_node(allocator, std::piecewise_construct,
0688                            std::forward_as_tuple(key), std::forward_as_tuple());
0689     }
0690 
0691     static node* do_not_allocate_node(bucket_allocator_type& , const Key &, const T * ){
0692         __TBB_ASSERT(false,"this dummy function should not be called");
0693         return nullptr;
0694     }
0695 
0696     template <typename K>
0697     node *search_bucket( const K &key, bucket *b ) const {
0698         node *n = static_cast<node*>( b->node_list.load(std::memory_order_relaxed) );
0699         while (this->is_valid(n) && !my_hash_compare.equal(key, n->value().first))
0700             n = static_cast<node*>( n->next );
0701         __TBB_ASSERT(!rehash_required(n), "Search can be executed only for rehashed bucket");
0702         return n;
0703     }
0704 
0705     // bucket accessor is to find, rehash, acquire a lock, and access a bucket
0706     class bucket_accessor : public bucket::scoped_type {
0707         bucket *my_b;
0708     public:
0709         bucket_accessor( concurrent_hash_map *base, const hashcode_type h, bool writer = false ) { acquire( base, h, writer ); }
0710         // find a bucket by masked hashcode, optionally rehash, and acquire the lock
0711         inline void acquire( concurrent_hash_map *base, const hashcode_type h, bool writer = false ) {
0712             my_b = base->get_bucket( h );
0713             // TODO: actually, notification is unnecessary here, just hiding double-check
0714             if (rehash_required(my_b->node_list.load(std::memory_order_acquire))
0715                 && bucket::scoped_type::try_acquire( my_b->mutex, /*write=*/true ) )
0716             {
0717                 if (rehash_required(my_b->node_list.load(std::memory_order_relaxed))) base->rehash_bucket(my_b, h); // recursive rehashing
0718             }
0719             else bucket::scoped_type::acquire( my_b->mutex, writer );
0720             __TBB_ASSERT(!rehash_required(my_b->node_list.load(std::memory_order_relaxed)), nullptr);
0721         }
0722 
0723         // get bucket pointer
0724         bucket *operator() () { return my_b; }
0725     };
0726 
0727     // TODO refactor to hash_base
0728     void rehash_bucket( bucket *b_new, const hashcode_type hash ) {
0729         __TBB_ASSERT( hash > 1, "The lowermost buckets can't be rehashed" );
0730         b_new->node_list.store(reinterpret_cast<node_base*>(empty_rehashed_flag), std::memory_order_release); // mark rehashed
0731         hashcode_type mask = (hashcode_type(1) << tbb::detail::log2(hash)) - 1; // get parent mask from the topmost bit
0732         bucket_accessor b_old( this, hash & mask );
0733 
0734         mask = (mask<<1) | 1; // get full mask for new bucket
0735         __TBB_ASSERT( (mask&(mask+1))==0 && (hash & mask) == hash, nullptr );
0736     restart:
0737         node_base* prev = nullptr;
0738         node_base* curr = b_old()->node_list.load(std::memory_order_acquire);
0739         while (this->is_valid(curr)) {
0740             hashcode_type curr_node_hash = my_hash_compare.hash(static_cast<node*>(curr)->value().first);
0741 
0742             if ((curr_node_hash & mask) == hash) {
0743                 if (!b_old.is_writer()) {
0744                     if (!b_old.upgrade_to_writer()) {
0745                         goto restart; // node ptr can be invalid due to concurrent erase
0746                     }
0747                 }
0748                 node_base* next = curr->next;
0749                 // exclude from b_old
0750                 if (prev == nullptr) {
0751                     b_old()->node_list.store(curr->next, std::memory_order_relaxed);
0752                 } else {
0753                     prev->next = curr->next;
0754                 }
0755                 this->add_to_bucket(b_new, curr);
0756                 curr = next;
0757             } else {
0758                 prev = curr;
0759                 curr = curr->next;
0760             }
0761         }
0762     }
0763 
0764     template <typename U>
0765     using hash_compare_is_transparent = dependent_bool<comp_is_transparent<hash_compare_type>, U>;
0766 
0767 public:
0768 
0769     class accessor;
0770     // Combines data access, locking, and garbage collection.
0771     class const_accessor : private node::scoped_type /*which derived from no_copy*/ {
0772 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
0773         friend class concurrent_hash_map<Key,T,HashCompare,Allocator,MutexType>;
0774 #else
0775         friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
0776 #endif
0777         friend class accessor;
0778     public:
0779         // Type of value
0780         using value_type = const typename concurrent_hash_map::value_type;
0781 
0782         // True if result is empty.
0783         bool empty() const { return !my_node; }
0784 
0785         // Set to null
0786         void release() {
0787             if( my_node ) {
0788                 node::scoped_type::release();
0789                 my_node = nullptr;
0790             }
0791         }
0792 
0793         // Return reference to associated value in hash table.
0794         const_reference operator*() const {
0795             __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
0796             return my_node->value();
0797         }
0798 
0799         // Return pointer to associated value in hash table.
0800         const_pointer operator->() const {
0801             return &operator*();
0802         }
0803 
0804         // Create empty result
0805         const_accessor() : my_node(nullptr), my_hash() {}
0806 
0807         // Destroy result after releasing the underlying reference.
0808         ~const_accessor() {
0809             my_node = nullptr; // scoped lock's release() is called in its destructor
0810         }
0811     protected:
0812         bool is_writer() { return node::scoped_type::is_writer(); }
0813         node *my_node;
0814         hashcode_type my_hash;
0815     };
0816 
0817     // Allows write access to elements and combines data access, locking, and garbage collection.
0818     class accessor: public const_accessor {
0819     public:
0820         // Type of value
0821         using value_type = typename concurrent_hash_map::value_type;
0822 
0823         // Return reference to associated value in hash table.
0824         reference operator*() const {
0825             __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
0826             return this->my_node->value();
0827         }
0828 
0829         // Return pointer to associated value in hash table.
0830         pointer operator->() const {
0831             return &operator*();
0832         }
0833     };
0834 
0835     explicit concurrent_hash_map( const hash_compare_type& compare, const allocator_type& a = allocator_type() )
0836         : base_type(a)
0837         , my_hash_compare(compare)
0838     {}
0839 
0840     concurrent_hash_map() : concurrent_hash_map(hash_compare_type()) {}
0841 
0842     explicit concurrent_hash_map( const allocator_type& a )
0843         : concurrent_hash_map(hash_compare_type(), a)
0844     {}
0845 
0846     // Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
0847     concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() )
0848         : concurrent_hash_map(a)
0849     {
0850         this->reserve(n);
0851     }
0852 
0853     concurrent_hash_map( size_type n, const hash_compare_type& compare, const allocator_type& a = allocator_type() )
0854         : concurrent_hash_map(compare, a)
0855     {
0856         this->reserve(n);
0857     }
0858 
0859     // Copy constructor
0860     concurrent_hash_map( const concurrent_hash_map &table )
0861         : concurrent_hash_map(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
0862     {
0863         try_call( [&] {
0864             internal_copy(table);
0865         }).on_exception( [&] {
0866             this->clear();
0867         });
0868     }
0869 
0870     concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a)
0871         : concurrent_hash_map(a)
0872     {
0873         try_call( [&] {
0874             internal_copy(table);
0875         }).on_exception( [&] {
0876             this->clear();
0877         });
0878     }
0879 
0880     // Move constructor
0881     concurrent_hash_map( concurrent_hash_map &&table )
0882         : concurrent_hash_map(std::move(table.get_allocator()))
0883     {
0884         this->internal_move(std::move(table));
0885     }
0886 
0887     // Move constructor
0888     concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a )
0889         : concurrent_hash_map(a)
0890     {
0891         using is_equal_type = typename node_allocator_traits::is_always_equal;
0892         internal_move_construct_with_allocator(std::move(table), a, is_equal_type());
0893     }
0894 
0895     // Construction with copying iteration range and given allocator instance
0896     template <typename I>
0897     concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() )
0898         : concurrent_hash_map(a)
0899     {
0900         try_call( [&] {
0901             internal_copy(first, last, std::distance(first, last));
0902         }).on_exception( [&] {
0903             this->clear();
0904         });
0905     }
0906 
0907     template <typename I>
0908     concurrent_hash_map( I first, I last, const hash_compare_type& compare, const allocator_type& a = allocator_type() )
0909         : concurrent_hash_map(compare, a)
0910     {
0911         try_call( [&] {
0912             internal_copy(first, last, std::distance(first, last));
0913         }).on_exception( [&] {
0914             this->clear();
0915         });
0916     }
0917 
0918     concurrent_hash_map( std::initializer_list<value_type> il, const hash_compare_type& compare = hash_compare_type(), const allocator_type& a = allocator_type() )
0919         : concurrent_hash_map(compare, a)
0920     {
0921         try_call( [&] {
0922             internal_copy(il.begin(), il.end(), il.size());
0923         }).on_exception( [&] {
0924             this->clear();
0925         });
0926     }
0927 
0928     concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type& a )
0929         : concurrent_hash_map(il, hash_compare_type(), a) {}
0930 
0931     // Assignment
0932     concurrent_hash_map& operator=( const concurrent_hash_map &table ) {
0933         if( this != &table ) {
0934             clear();
0935             copy_assign_allocators(this->my_allocator, table.my_allocator);
0936             internal_copy(table);
0937         }
0938         return *this;
0939     }
0940 
0941     // Move Assignment
0942     concurrent_hash_map& operator=( concurrent_hash_map &&table ) {
0943         if( this != &table ) {
0944             using pocma_type = typename node_allocator_traits::propagate_on_container_move_assignment;
0945             using is_equal_type = typename node_allocator_traits::is_always_equal;
0946             move_assign_allocators(this->my_allocator, table.my_allocator);
0947             internal_move_assign(std::move(table), tbb::detail::disjunction<is_equal_type, pocma_type>());
0948         }
0949         return *this;
0950     }
0951 
0952     // Assignment
0953     concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
0954         clear();
0955         internal_copy(il.begin(), il.end(), il.size());
0956         return *this;
0957     }
0958 
0959     // Rehashes and optionally resizes the whole table.
0960     /** Useful to optimize performance before or after concurrent operations.
0961         Also enables using of find() and count() concurrent methods in serial context. */
0962     void rehash(size_type sz = 0) {
0963         this->reserve(sz); // TODO: add reduction of number of buckets as well
0964         hashcode_type mask = this->my_mask.load(std::memory_order_relaxed);
0965         hashcode_type b = (mask+1)>>1; // size or first index of the last segment
0966         __TBB_ASSERT((b&(b-1))==0, nullptr); // zero or power of 2
0967         bucket *bp = this->get_bucket( b ); // only the last segment should be scanned for rehashing
0968         for(; b <= mask; b++, bp++ ) {
0969             node_base *n = bp->node_list.load(std::memory_order_relaxed);
0970             __TBB_ASSERT( this->is_valid(n) || empty_rehashed(n) || rehash_required(n), "Broken internal structure" );
0971             __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
0972             if (rehash_required(n)) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
0973                 hashcode_type h = b; bucket *b_old = bp;
0974                 do {
0975                     __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
0976                     hashcode_type m = ( hashcode_type(1) << tbb::detail::log2( h ) ) - 1; // get parent mask from the topmost bit
0977                     b_old = this->get_bucket( h &= m );
0978                 } while( rehash_required(b_old->node_list.load(std::memory_order_relaxed)) );
0979                 // now h - is index of the root rehashed bucket b_old
0980                 this->mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
0981                 node_base* prev = nullptr;
0982                 node_base* curr = b_old->node_list.load(std::memory_order_relaxed);
0983                 while (this->is_valid(curr)) {
0984                     hashcode_type curr_node_hash = my_hash_compare.hash(static_cast<node*>(curr)->value().first);
0985 
0986                     if ((curr_node_hash & mask) != h) { // should be rehashed
0987                         node_base* next = curr->next;
0988                         // exclude from b_old
0989                         if (prev == nullptr) {
0990                             b_old->node_list.store(curr->next, std::memory_order_relaxed);
0991                         } else {
0992                             prev->next = curr->next;
0993                         }
0994                         bucket *b_new = this->get_bucket(curr_node_hash & mask);
0995                         __TBB_ASSERT(!rehash_required(b_new->node_list.load(std::memory_order_relaxed)), "hash() function changed for key in table or internal error");
0996                         this->add_to_bucket(b_new, curr);
0997                         curr = next;
0998                     } else {
0999                         prev = curr;
1000                         curr = curr->next;
1001                     }
1002                 }
1003             }
1004         }
1005     }
1006 
1007     // Clear table
1008     void clear() {
1009         hashcode_type m = this->my_mask.load(std::memory_order_relaxed);
1010         __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1011         this->my_size.store(0, std::memory_order_relaxed);
1012         segment_index_type s = this->segment_index_of( m );
1013         __TBB_ASSERT( s+1 == this->pointers_per_table || !this->my_table[s+1].load(std::memory_order_relaxed), "wrong mask or concurrent grow" );
1014         do {
1015             __TBB_ASSERT(this->is_valid(this->my_table[s].load(std::memory_order_relaxed)), "wrong mask or concurrent grow" );
1016             segment_ptr_type buckets_ptr = this->my_table[s].load(std::memory_order_relaxed);
1017             size_type sz = this->segment_size( s ? s : 1 );
1018             for( segment_index_type i = 0; i < sz; i++ )
1019                 for( node_base *n = buckets_ptr[i].node_list.load(std::memory_order_relaxed);
1020                     this->is_valid(n); n = buckets_ptr[i].node_list.load(std::memory_order_relaxed) )
1021                 {
1022                     buckets_ptr[i].node_list.store(n->next, std::memory_order_relaxed);
1023                     delete_node( n );
1024                 }
1025             this->delete_segment(s);
1026         } while(s-- > 0);
1027         this->my_mask.store(this->embedded_buckets - 1, std::memory_order_relaxed);
1028     }
1029 
1030     // Clear table and destroy it.
1031     ~concurrent_hash_map() { clear(); }
1032 
1033     //------------------------------------------------------------------------
1034     // Parallel algorithm support
1035     //------------------------------------------------------------------------
1036     range_type range( size_type grainsize=1 ) {
1037         return range_type( *this, grainsize );
1038     }
1039     const_range_type range( size_type grainsize=1 ) const {
1040         return const_range_type( *this, grainsize );
1041     }
1042 
1043     //------------------------------------------------------------------------
1044     // STL support - not thread-safe methods
1045     //------------------------------------------------------------------------
1046     iterator begin() { return iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); }
1047     const_iterator begin() const { return const_iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); }
1048     const_iterator cbegin() const { return const_iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); }
1049     iterator end() { return iterator( *this, 0, nullptr, nullptr ); }
1050     const_iterator end() const { return const_iterator( *this, 0, nullptr, nullptr ); }
1051     const_iterator cend() const { return const_iterator( *this, 0, nullptr, nullptr ); }
1052     std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
1053     std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
1054 
1055     template <typename K>
1056     typename std::enable_if<hash_compare_is_transparent<K>::value,
1057                             std::pair<iterator, iterator>>::type equal_range( const K& key ) {
1058         return internal_equal_range(key, end());
1059     }
1060 
1061     template <typename K>
1062     typename std::enable_if<hash_compare_is_transparent<K>::value,
1063                             std::pair<const_iterator, const_iterator>>::type equal_range( const K& key ) const {
1064         return internal_equal_range(key, end());
1065     }
1066 
1067     // Number of items in table.
1068     size_type size() const { return this->my_size.load(std::memory_order_acquire); }
1069 
1070     // True if size()==0.
1071     __TBB_nodiscard bool empty() const { return size() == 0; }
1072 
1073     // Upper bound on size.
1074     size_type max_size() const {
1075         return allocator_traits_type::max_size(base_type::get_allocator());
1076     }
1077 
1078     // Returns the current number of buckets
1079     size_type bucket_count() const { return this->my_mask.load(std::memory_order_relaxed) + 1; }
1080 
1081     // return allocator object
1082     allocator_type get_allocator() const { return base_type::get_allocator(); }
1083 
1084     // swap two instances. Iterators are invalidated
1085     void swap(concurrent_hash_map& table) {
1086         using pocs_type = typename node_allocator_traits::propagate_on_container_swap;
1087         using is_equal_type = typename node_allocator_traits::is_always_equal;
1088         swap_allocators(this->my_allocator, table.my_allocator);
1089         internal_swap(table, tbb::detail::disjunction<pocs_type, is_equal_type>());
1090     }
1091 
1092     //------------------------------------------------------------------------
1093     // concurrent map operations
1094     //------------------------------------------------------------------------
1095 
1096     // Return count of items (0 or 1)
1097     size_type count( const Key &key ) const {
1098         return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(key, nullptr, nullptr, /*write=*/false, &do_not_allocate_node);
1099     }
1100 
1101     template <typename K>
1102     typename std::enable_if<hash_compare_is_transparent<K>::value,
1103                             size_type>::type count( const K& key ) const {
1104         return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(key, nullptr, nullptr, /*write=*/false, &do_not_allocate_node);
1105     }
1106 
1107     // Find item and acquire a read lock on the item.
1108     /** Return true if item is found, false otherwise. */
1109     bool find( const_accessor &result, const Key &key ) const {
1110         result.release();
1111         return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(key, nullptr, &result, /*write=*/false, &do_not_allocate_node );
1112     }
1113 
1114     // Find item and acquire a write lock on the item.
1115     /** Return true if item is found, false otherwise. */
1116     bool find( accessor &result, const Key &key ) {
1117         result.release();
1118         return lookup</*insert*/false>(key, nullptr, &result, /*write=*/true, &do_not_allocate_node);
1119     }
1120 
1121     template <typename K>
1122     typename std::enable_if<hash_compare_is_transparent<K>::value,
1123                             bool>::type find( const_accessor& result, const K& key ) {
1124         result.release();
1125         return lookup</*insert*/false>(key, nullptr, &result, /*write=*/false, &do_not_allocate_node);
1126     }
1127 
1128     template <typename K>
1129     typename std::enable_if<hash_compare_is_transparent<K>::value,
1130                             bool>::type find( accessor& result, const K& key ) {
1131         result.release();
1132         return lookup</*insert*/false>(key, nullptr, &result, /*write=*/true, &do_not_allocate_node);
1133     }
1134 
1135     // Insert item (if not already present) and acquire a read lock on the item.
1136     /** Returns true if item is new. */
1137     bool insert( const_accessor &result, const Key &key ) {
1138         result.release();
1139         return lookup</*insert*/true>(key, nullptr, &result, /*write=*/false, &allocate_node_default_construct<>);
1140     }
1141 
1142     // Insert item (if not already present) and acquire a write lock on the item.
1143     /** Returns true if item is new. */
1144     bool insert( accessor &result, const Key &key ) {
1145         result.release();
1146         return lookup</*insert*/true>(key, nullptr, &result, /*write=*/true, &allocate_node_default_construct<>);
1147     }
1148 
1149     template <typename K>
1150     typename std::enable_if<hash_compare_is_transparent<K>::value &&
1151                             std::is_constructible<key_type, const K&>::value,
1152                             bool>::type insert( const_accessor& result, const K& key ) {
1153         result.release();
1154         return lookup</*insert*/true>(key, nullptr, &result, /*write=*/false, &allocate_node_default_construct<K>);
1155     }
1156 
1157     template <typename K>
1158     typename std::enable_if<hash_compare_is_transparent<K>::value &&
1159                             std::is_constructible<key_type, const K&>::value,
1160                             bool>::type insert( accessor& result, const K& key ) {
1161         result.release();
1162         return lookup</*insert*/true>(key, nullptr, &result, /*write=*/true, &allocate_node_default_construct<K>);
1163     }
1164 
1165     // Insert item by copying if there is no such key present already and acquire a read lock on the item.
1166     /** Returns true if item is new. */
1167     bool insert( const_accessor &result, const value_type &value ) {
1168         result.release();
1169         return lookup</*insert*/true>(value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct);
1170     }
1171 
1172     // Insert item by copying if there is no such key present already and acquire a write lock on the item.
1173     /** Returns true if item is new. */
1174     bool insert( accessor &result, const value_type &value ) {
1175         result.release();
1176         return lookup</*insert*/true>(value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct);
1177     }
1178 
1179     // Insert item by copying if there is no such key present already
1180     /** Returns true if item is inserted. */
1181     bool insert( const value_type &value ) {
1182         return lookup</*insert*/true>(value.first, &value.second, nullptr, /*write=*/false, &allocate_node_copy_construct);
1183     }
1184 
1185     // Insert item by copying if there is no such key present already and acquire a read lock on the item.
1186     /** Returns true if item is new. */
1187     bool insert( const_accessor &result, value_type && value ) {
1188         return generic_move_insert(result, std::move(value));
1189     }
1190 
1191     // Insert item by copying if there is no such key present already and acquire a write lock on the item.
1192     /** Returns true if item is new. */
1193     bool insert( accessor &result, value_type && value ) {
1194         return generic_move_insert(result, std::move(value));
1195     }
1196 
1197     // Insert item by copying if there is no such key present already
1198     /** Returns true if item is inserted. */
1199     bool insert( value_type && value ) {
1200         return generic_move_insert(accessor_not_used(), std::move(value));
1201     }
1202 
1203     // Insert item by copying if there is no such key present already and acquire a read lock on the item.
1204     /** Returns true if item is new. */
1205     template <typename... Args>
1206     bool emplace( const_accessor &result, Args&&... args ) {
1207         return generic_emplace(result, std::forward<Args>(args)...);
1208     }
1209 
1210     // Insert item by copying if there is no such key present already and acquire a write lock on the item.
1211     /** Returns true if item is new. */
1212     template <typename... Args>
1213     bool emplace( accessor &result, Args&&... args ) {
1214         return generic_emplace(result, std::forward<Args>(args)...);
1215     }
1216 
1217     // Insert item by copying if there is no such key present already
1218     /** Returns true if item is inserted. */
1219     template <typename... Args>
1220     bool emplace( Args&&... args ) {
1221         return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1222     }
1223 
1224     // Insert range [first, last)
1225     template <typename I>
1226     void insert( I first, I last ) {
1227         for ( ; first != last; ++first )
1228             insert( *first );
1229     }
1230 
1231     // Insert initializer list
1232     void insert( std::initializer_list<value_type> il ) {
1233         insert( il.begin(), il.end() );
1234     }
1235 
1236     // Erase item.
1237     /** Return true if item was erased by particularly this call. */
1238     bool erase( const Key &key ) {
1239         return internal_erase(key);
1240     }
1241 
1242     template <typename K>
1243     typename std::enable_if<hash_compare_is_transparent<K>::value,
1244                             bool>::type erase( const K& key ) {
1245         return internal_erase(key);
1246     }
1247 
1248     // Erase item by const_accessor.
1249     /** Return true if item was erased by particularly this call. */
1250     bool erase( const_accessor& item_accessor ) {
1251         return exclude( item_accessor );
1252     }
1253 
1254     // Erase item by accessor.
1255     /** Return true if item was erased by particularly this call. */
1256     bool erase( accessor& item_accessor ) {
1257         return exclude( item_accessor );
1258     }
1259 
1260 protected:
1261     template <typename K, typename AllocateNodeType>
1262     node* allocate_node_helper( const K& key, const T* t, AllocateNodeType allocate_node, std::true_type ) {
1263         return allocate_node(base_type::get_allocator(), key, t);
1264     }
1265 
1266     template <typename K, typename AllocateNodeType>
1267     node* allocate_node_helper( const K&, const T*, AllocateNodeType, std::false_type ) {
1268         __TBB_ASSERT(false, "allocate_node_helper with std::false_type should never been called");
1269         return nullptr;
1270     }
1271 
1272     // Insert or find item and optionally acquire a lock on the item.
1273     template <bool OpInsert, typename K, typename AllocateNodeType>
1274     bool lookup( const K &key, const T *t, const_accessor *result, bool write, AllocateNodeType allocate_node, node *tmp_n  = nullptr)
1275     {
1276         __TBB_ASSERT( !result || !result->my_node, nullptr );
1277         bool return_value;
1278         hashcode_type const h = my_hash_compare.hash( key );
1279         hashcode_type m = this->my_mask.load(std::memory_order_acquire);
1280         segment_index_type grow_segment = 0;
1281         node *n;
1282         restart:
1283         {//lock scope
1284             __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1285             return_value = false;
1286             // get bucket
1287             bucket_accessor b( this, h & m );
1288             // find a node
1289             n = search_bucket( key, b() );
1290             if( OpInsert ) {
1291                 // [opt] insert a key
1292                 if( !n ) {
1293                     if( !tmp_n ) {
1294                         tmp_n = allocate_node_helper(key, t, allocate_node, std::integral_constant<bool, OpInsert>{});
1295                     }
1296                     while ( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1297                         // Rerun search list, in case another thread inserted the intem during the upgrade
1298                         n = search_bucket(key, b());
1299                         if (this->is_valid(n)) { // unfortunately, it did
1300                             if (!b.downgrade_to_reader()) {
1301                                 // If the lock was downgraded with reacquiring the mutex
1302                                 // Rerun search list in case another thread removed the item during the downgrade
1303                                 n = search_bucket(key, b());
1304                                 if (!this->is_valid(n)) {
1305                                     // Unfortunately, it did
1306                                     // We need to try upgrading to writer again
1307                                     continue;
1308                                 }
1309                             }
1310                             goto exists;
1311                         }
1312                     }
1313 
1314                     if( this->check_mask_race(h, m) )
1315                         goto restart; // b.release() is done in ~b().
1316                     // insert and set flag to grow the container
1317                     grow_segment = this->insert_new_node( b(), n = tmp_n, m );
1318                     tmp_n = nullptr;
1319                     return_value = true;
1320                 }
1321             } else { // find or count
1322                 if( !n ) {
1323                     if( this->check_mask_race( h, m ) )
1324                         goto restart; // b.release() is done in ~b(). TODO: replace by continue
1325                     return false;
1326                 }
1327                 return_value = true;
1328             }
1329         exists:
1330             if( !result ) goto check_growth;
1331             // TODO: the following seems as generic/regular operation
1332             // acquire the item
1333             if( !result->try_acquire( n->mutex, write ) ) {
1334                 for( tbb::detail::atomic_backoff backoff(true);; ) {
1335                     if( result->try_acquire( n->mutex, write ) ) break;
1336                     if( !backoff.bounded_pause() ) {
1337                         // the wait takes really long, restart the operation
1338                         b.release();
1339                         __TBB_ASSERT( !OpInsert || !return_value, "Can't acquire new item in locked bucket?" );
1340                         yield();
1341                         m = this->my_mask.load(std::memory_order_acquire);
1342                         goto restart;
1343                     }
1344                 }
1345             }
1346         }//lock scope
1347         result->my_node = n;
1348         result->my_hash = h;
1349     check_growth:
1350         // [opt] grow the container
1351         if( grow_segment ) {
1352             this->enable_segment( grow_segment );
1353         }
1354         if( tmp_n ) // if OpInsert only
1355             delete_node( tmp_n );
1356         return return_value;
1357     }
1358 
1359     struct accessor_not_used { void release(){}};
1360     friend const_accessor* accessor_location( accessor_not_used const& ){ return nullptr;}
1361     friend const_accessor* accessor_location( const_accessor & a )      { return &a;}
1362 
1363     friend bool is_write_access_needed( accessor const& )           { return true;}
1364     friend bool is_write_access_needed( const_accessor const& )     { return false;}
1365     friend bool is_write_access_needed( accessor_not_used const& )  { return false;}
1366 
1367     template <typename Accessor>
1368     bool generic_move_insert( Accessor && result, value_type && value ) {
1369         result.release();
1370         return lookup</*insert*/true>(value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct);
1371     }
1372 
1373     template <typename Accessor, typename... Args>
1374     bool generic_emplace( Accessor && result, Args &&... args ) {
1375         result.release();
1376         node * node_ptr = create_node(base_type::get_allocator(), std::forward<Args>(args)...);
1377         return lookup</*insert*/true>(node_ptr->value().first, nullptr, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr);
1378     }
1379 
1380     // delete item by accessor
1381     bool exclude( const_accessor &item_accessor ) {
1382         __TBB_ASSERT( item_accessor.my_node, nullptr );
1383         node_base *const exclude_node = item_accessor.my_node;
1384         hashcode_type const hash = item_accessor.my_hash;
1385         hashcode_type mask = this->my_mask.load(std::memory_order_acquire);
1386         do {
1387             // get bucket
1388             bucket_accessor b( this, hash & mask, /*writer=*/true );
1389             node_base* prev = nullptr;
1390             node_base* curr = b()->node_list.load(std::memory_order_relaxed);
1391 
1392             while (curr && curr != exclude_node) {
1393                 prev = curr;
1394                 curr = curr->next;
1395             }
1396 
1397             if (curr == nullptr) { // someone else was first
1398                 if (this->check_mask_race(hash, mask))
1399                     continue;
1400                 item_accessor.release();
1401                 return false;
1402             }
1403             __TBB_ASSERT( curr == exclude_node, nullptr );
1404             // remove from container
1405             if (prev == nullptr) {
1406                 b()->node_list.store(curr->next, std::memory_order_relaxed);
1407             } else {
1408                 prev->next = curr->next;
1409             }
1410 
1411             this->my_size--;
1412             break;
1413         } while(true);
1414         if (!item_accessor.is_writer()) { // need to get exclusive lock
1415             item_accessor.upgrade_to_writer(); // return value means nothing here
1416         }
1417 
1418         item_accessor.release();
1419         delete_node(exclude_node); // Only one thread can delete it
1420         return true;
1421     }
1422 
1423     template <typename K>
1424     bool internal_erase( const K& key ) {
1425         node_base *erase_node;
1426         hashcode_type const hash = my_hash_compare.hash(key);
1427         hashcode_type mask = this->my_mask.load(std::memory_order_acquire);
1428     restart:
1429         {//lock scope
1430             // get bucket
1431             bucket_accessor b( this, hash & mask );
1432         search:
1433             node_base* prev = nullptr;
1434             erase_node = b()->node_list.load(std::memory_order_relaxed);
1435             while (this->is_valid(erase_node) && !my_hash_compare.equal(key, static_cast<node*>(erase_node)->value().first ) ) {
1436                 prev = erase_node;
1437                 erase_node = erase_node->next;
1438             }
1439 
1440             if (erase_node == nullptr) { // not found, but mask could be changed
1441                 if (this->check_mask_race(hash, mask))
1442                     goto restart;
1443                 return false;
1444             } else if (!b.is_writer() && !b.upgrade_to_writer()) {
1445                 if (this->check_mask_race(hash, mask)) // contended upgrade, check mask
1446                     goto restart;
1447                 goto search;
1448             }
1449 
1450             // remove from container
1451             if (prev == nullptr) {
1452                 b()->node_list.store(erase_node->next, std::memory_order_relaxed);
1453             } else {
1454                 prev->next = erase_node->next;
1455             }
1456             this->my_size--;
1457         }
1458         {
1459             typename node::scoped_type item_locker( erase_node->mutex, /*write=*/true );
1460         }
1461         // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1462         delete_node(erase_node); // Only one thread can delete it due to write lock on the bucket
1463         return true;
1464     }
1465 
1466     // Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
1467     template <typename K, typename I>
1468     std::pair<I, I> internal_equal_range( const K& key, I end_ ) const {
1469         hashcode_type h = my_hash_compare.hash( key );
1470         hashcode_type m = this->my_mask.load(std::memory_order_relaxed);
1471         __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1472         h &= m;
1473         bucket *b = this->get_bucket( h );
1474         while (rehash_required(b->node_list.load(std::memory_order_relaxed))) {
1475             m = ( hashcode_type(1) << tbb::detail::log2( h ) ) - 1; // get parent mask from the topmost bit
1476             b = this->get_bucket( h &= m );
1477         }
1478         node *n = search_bucket( key, b );
1479         if( !n )
1480             return std::make_pair(end_, end_);
1481         iterator lower(*this, h, b, n), upper(lower);
1482         return std::make_pair(lower, ++upper);
1483     }
1484 
1485     // Copy "source" to *this, where *this must start out empty.
1486     void internal_copy( const concurrent_hash_map& source ) {
1487         hashcode_type mask = source.my_mask.load(std::memory_order_relaxed);
1488         if( this->my_mask.load(std::memory_order_relaxed) == mask ) { // optimized version
1489             this->reserve(source.my_size.load(std::memory_order_relaxed)); // TODO: load_factor?
1490             bucket *dst = nullptr, *src = nullptr;
1491             bool rehashing_required = false;
1492             for( hashcode_type k = 0; k <= mask; k++ ) {
1493                 if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1494                 else { dst = this->get_bucket( k ); src = source.get_bucket( k ); }
1495                 __TBB_ASSERT(!rehash_required(dst->node_list.load(std::memory_order_relaxed)), "Invalid bucket in destination table");
1496                 node *n = static_cast<node*>( src->node_list.load(std::memory_order_relaxed) );
1497                 if (rehash_required(n)) { // source is not rehashed, items are in previous buckets
1498                     rehashing_required = true;
1499                     dst->node_list.store(reinterpret_cast<node_base*>(rehash_req_flag), std::memory_order_relaxed);
1500                 } else for(; n; n = static_cast<node*>( n->next ) ) {
1501                     node* node_ptr = create_node(base_type::get_allocator(), n->value().first, n->value().second);
1502                     this->add_to_bucket( dst, node_ptr);
1503                     this->my_size.fetch_add(1, std::memory_order_relaxed);
1504                 }
1505             }
1506             if( rehashing_required ) rehash();
1507         } else internal_copy(source.begin(), source.end(), source.my_size.load(std::memory_order_relaxed));
1508     }
1509 
1510     template <typename I>
1511     void internal_copy( I first, I last, size_type reserve_size ) {
1512         this->reserve(reserve_size); // TODO: load_factor?
1513         hashcode_type m = this->my_mask.load(std::memory_order_relaxed);
1514         for(; first != last; ++first) {
1515             hashcode_type h = my_hash_compare.hash( (*first).first );
1516             bucket *b = this->get_bucket( h & m );
1517             __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), "Invalid bucket in destination table");
1518             node* node_ptr = create_node(base_type::get_allocator(), (*first).first, (*first).second);
1519             this->add_to_bucket( b, node_ptr );
1520             ++this->my_size; // TODO: replace by non-atomic op
1521         }
1522     }
1523 
1524     void internal_move_construct_with_allocator( concurrent_hash_map&& other, const allocator_type&,
1525                                                 /*is_always_equal=*/std::true_type )
1526     {
1527         this->internal_move(std::move(other));
1528     }
1529 
1530     void internal_move_construct_with_allocator( concurrent_hash_map&& other, const allocator_type& a,
1531                                                 /*is_always_equal=*/std::false_type )
1532     {
1533         if (a == other.get_allocator()){
1534             this->internal_move(std::move(other));
1535         } else {
1536             try_call( [&] {
1537                 internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()),
1538                     other.size());
1539             }).on_exception( [&] {
1540                 this->clear();
1541             });
1542         }
1543     }
1544 
1545     void internal_move_assign( concurrent_hash_map&& other,
1546         /*is_always_equal || POCMA = */std::true_type)
1547     {
1548         this->internal_move(std::move(other));
1549     }
1550 
1551     void internal_move_assign(concurrent_hash_map&& other, /*is_always_equal=*/ std::false_type) {
1552         if (this->my_allocator == other.my_allocator) {
1553             this->internal_move(std::move(other));
1554         } else {
1555             //do per element move
1556             internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()),
1557                 other.size());
1558         }
1559     }
1560 
1561     void internal_swap(concurrent_hash_map& other, /*is_always_equal || POCS = */ std::true_type) {
1562         this->internal_swap_content(other);
1563     }
1564 
1565     void internal_swap(concurrent_hash_map& other, /*is_always_equal || POCS = */ std::false_type) {
1566         __TBB_ASSERT(this->my_allocator == other.my_allocator, nullptr);
1567         this->internal_swap_content(other);
1568     }
1569 
1570     // Fast find when no concurrent erasure is used. For internal use inside TBB only!
1571     /** Return pointer to item with given key, or nullptr if no such item exists.
1572         Must not be called concurrently with erasure operations. */
1573     const_pointer internal_fast_find( const Key& key ) const {
1574         hashcode_type h = my_hash_compare.hash( key );
1575         hashcode_type m = this->my_mask.load(std::memory_order_acquire);
1576         node *n;
1577     restart:
1578         __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1579         bucket *b = this->get_bucket( h & m );
1580         // TODO: actually, notification is unnecessary here, just hiding double-check
1581         if (rehash_required(b->node_list.load(std::memory_order_acquire)))
1582         {
1583             typename bucket::scoped_type lock;
1584             if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
1585                 if (rehash_required(b->node_list.load(std::memory_order_relaxed)))
1586                     const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
1587             }
1588             else lock.acquire( b->mutex, /*write=*/false );
1589             __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), nullptr);
1590         }
1591         n = search_bucket( key, b );
1592         if( n )
1593             return n->storage();
1594         else if( this->check_mask_race( h, m ) )
1595             goto restart;
1596         return nullptr;
1597     }
1598 };
1599 
1600 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1601 template <typename It,
1602           typename HashCompare = d1::tbb_hash_compare<iterator_key_t<It>>,
1603           typename Alloc = tbb_allocator<iterator_alloc_pair_t<It>>,
1604           typename = std::enable_if_t<is_input_iterator_v<It>>,
1605           typename = std::enable_if_t<is_allocator_v<Alloc>>,
1606           typename = std::enable_if_t<!is_allocator_v<HashCompare>>>
1607 concurrent_hash_map( It, It, HashCompare = HashCompare(), Alloc = Alloc() )
1608 -> concurrent_hash_map<iterator_key_t<It>, iterator_mapped_t<It>, HashCompare, Alloc>;
1609 
1610 template <typename It, typename Alloc,
1611           typename = std::enable_if_t<is_input_iterator_v<It>>,
1612           typename = std::enable_if_t<is_allocator_v<Alloc>>>
1613 concurrent_hash_map( It, It, Alloc )
1614 -> concurrent_hash_map<iterator_key_t<It>, iterator_mapped_t<It>, d1::tbb_hash_compare<iterator_key_t<It>>, Alloc>;
1615 
1616 template <typename Key, typename T,
1617           typename HashCompare = d1::tbb_hash_compare<std::remove_const_t<Key>>,
1618           typename Alloc = tbb_allocator<std::pair<const Key, T>>,
1619           typename = std::enable_if_t<is_allocator_v<Alloc>>,
1620           typename = std::enable_if_t<!is_allocator_v<HashCompare>>>
1621 concurrent_hash_map( std::initializer_list<std::pair<Key, T>>, HashCompare = HashCompare(), Alloc = Alloc() )
1622 -> concurrent_hash_map<std::remove_const_t<Key>, T, HashCompare, Alloc>;
1623 
1624 template <typename Key, typename T, typename Alloc,
1625           typename = std::enable_if_t<is_allocator_v<Alloc>>>
1626 concurrent_hash_map( std::initializer_list<std::pair<Key, T>>, Alloc )
1627 -> concurrent_hash_map<std::remove_const_t<Key>, T, d1::tbb_hash_compare<std::remove_const_t<Key>>, Alloc>;
1628 
1629 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1630 
1631 template <typename Key, typename T, typename HashCompare, typename A1, typename A2>
1632 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
1633     if(a.size() != b.size()) return false;
1634     typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
1635     typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
1636     for(; i != i_end; ++i) {
1637         j = b.equal_range(i->first).first;
1638         if( j == j_end || !(i->second == j->second) ) return false;
1639     }
1640     return true;
1641 }
1642 
1643 #if !__TBB_CPP20_COMPARISONS_PRESENT
1644 template <typename Key, typename T, typename HashCompare, typename A1, typename A2>
1645 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
1646 {    return !(a == b); }
1647 #endif // !__TBB_CPP20_COMPARISONS_PRESENT
1648 
1649 template <typename Key, typename T, typename HashCompare, typename A>
1650 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
1651 {    a.swap( b ); }
1652 
1653 } // namespace d2
1654 } // namespace detail
1655 
1656 inline namespace v1 {
1657     using detail::split;
1658     using detail::d2::concurrent_hash_map;
1659     using detail::d1::tbb_hash_compare;
1660 } // namespace v1
1661 
1662 } // namespace tbb
1663 
1664 #endif /* __TBB_concurrent_hash_map_H */