Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 10:12:52

0001 /*
0002     Copyright (c) 2005-2020 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_concurrent_hash_map_H
0018 #define __TBB_concurrent_hash_map_H
0019 
0020 #define __TBB_concurrent_hash_map_H_include_area
0021 #include "internal/_warning_suppress_enable_notice.h"
0022 
0023 #include "tbb_stddef.h"
0024 #include <iterator>
0025 #include <utility>      // Need std::pair
0026 #include <cstring>      // Need std::memset
0027 #include __TBB_STD_SWAP_HEADER
0028 
0029 #include "tbb_allocator.h"
0030 #include "spin_rw_mutex.h"
0031 #include "atomic.h"
0032 #include "tbb_exception.h"
0033 #include "tbb_profiling.h"
0034 #include "aligned_space.h"
0035 #include "internal/_tbb_hash_compare_impl.h"
0036 #include "internal/_template_helpers.h"
0037 #include "internal/_allocator_traits.h"
0038 #if __TBB_INITIALIZER_LISTS_PRESENT
0039 #include <initializer_list>
0040 #endif
0041 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
0042 #include <typeinfo>
0043 #endif
0044 #if __TBB_STATISTICS
0045 #include <stdio.h>
0046 #endif
0047 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
0048 // Definition of __TBB_CPP11_RVALUE_REF_PRESENT includes __TBB_CPP11_TUPLE_PRESENT
0049 // for most of platforms, tuple present macro was added for logical correctness
0050 #include <tuple>
0051 #endif
0052 
0053 namespace tbb {
0054 
0055 namespace interface5 {
0056 
0057     template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<const Key, T> > >
0058     class concurrent_hash_map;
0059 
0060     //! @cond INTERNAL
0061     namespace internal {
0062     using namespace tbb::internal;
0063 
0064 
0065     //! Type of a hash code.
0066     typedef size_t hashcode_t;
0067     //! Node base type
0068     struct hash_map_node_base : tbb::internal::no_copy {
0069         //! Mutex type
0070         typedef spin_rw_mutex mutex_t;
0071         //! Scoped lock type for mutex
0072         typedef mutex_t::scoped_lock scoped_t;
0073         //! Next node in chain
0074         hash_map_node_base *next;
0075         mutex_t mutex;
0076     };
0077     //! Incompleteness flag value
0078     static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
0079     //! Rehashed empty bucket flag
0080     static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
0081     //! base class of concurrent_hash_map
0082     class hash_map_base {
0083     public:
0084         //! Size type
0085         typedef size_t size_type;
0086         //! Type of a hash code.
0087         typedef size_t hashcode_t;
0088         //! Segment index type
0089         typedef size_t segment_index_t;
0090         //! Node base type
0091         typedef hash_map_node_base node_base;
0092         //! Bucket type
0093         struct bucket : tbb::internal::no_copy {
0094             //! Mutex type for buckets
0095             typedef spin_rw_mutex mutex_t;
0096             //! Scoped lock type for mutex
0097             typedef mutex_t::scoped_lock scoped_t;
0098             mutex_t mutex;
0099             node_base *node_list;
0100         };
0101         //! Count of segments in the first block
0102         static size_type const embedded_block = 1;
0103         //! Count of segments in the first block
0104         static size_type const embedded_buckets = 1<<embedded_block;
0105         //! Count of segments in the first block
0106         static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
0107         //! Size of a pointer / table size
0108         static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit
0109         //! Segment pointer
0110         typedef bucket *segment_ptr_t;
0111         //! Segment pointers table type
0112         typedef segment_ptr_t segments_table_t[pointers_per_table];
0113         //! Hash mask = sum of allocated segment sizes - 1
0114         atomic<hashcode_t> my_mask;
0115         //! Segment pointers table. Also prevents false sharing between my_mask and my_size
0116         segments_table_t my_table;
0117         //! Size of container in stored items
0118         atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
0119         //! Zero segment
0120         bucket my_embedded_segment[embedded_buckets];
0121 #if __TBB_STATISTICS
0122         atomic<unsigned> my_info_resizes; // concurrent ones
0123         mutable atomic<unsigned> my_info_restarts; // race collisions
0124         atomic<unsigned> my_info_rehashes;  // invocations of rehash_bucket
0125 #endif
0126         //! Constructor
0127         hash_map_base() {
0128             std::memset(my_table, 0, sizeof(my_table));
0129             my_mask = 0;
0130             my_size = 0;
0131             std::memset(my_embedded_segment, 0, sizeof(my_embedded_segment));
0132             for( size_type i = 0; i < embedded_block; i++ ) // fill the table
0133                 my_table[i] = my_embedded_segment + segment_base(i);
0134             my_mask = embedded_buckets - 1;
0135             __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
0136 #if __TBB_STATISTICS
0137             my_info_resizes = 0; // concurrent ones
0138             my_info_restarts = 0; // race collisions
0139             my_info_rehashes = 0;  // invocations of rehash_bucket
0140 #endif
0141         }
0142 
0143         //! @return segment index of given index in the array
0144         static segment_index_t segment_index_of( size_type index ) {
0145             return segment_index_t( __TBB_Log2( index|1 ) );
0146         }
0147 
0148         //! @return the first array index of given segment
0149         static segment_index_t segment_base( segment_index_t k ) {
0150             return (segment_index_t(1)<<k & ~segment_index_t(1));
0151         }
0152 
0153         //! @return segment size except for @arg k == 0
0154         static size_type segment_size( segment_index_t k ) {
0155             return size_type(1)<<k; // fake value for k==0
0156         }
0157 
0158         //! @return true if @arg ptr is valid pointer
0159         static bool is_valid( void *ptr ) {
0160             return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
0161         }
0162 
0163         //! Initialize buckets
0164         static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
0165             if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*sizeof(bucket) );
0166             else for(size_type i = 0; i < sz; i++, ptr++) {
0167                 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
0168                 ptr->node_list = rehash_req;
0169             }
0170         }
0171 
0172         //! Add node @arg n to bucket @arg b
0173         static void add_to_bucket( bucket *b, node_base *n ) {
0174             __TBB_ASSERT(b->node_list != rehash_req, NULL);
0175             n->next = b->node_list;
0176             b->node_list = n; // its under lock and flag is set
0177         }
0178 
0179         //! Exception safety helper
0180         struct enable_segment_failsafe : tbb::internal::no_copy {
0181             segment_ptr_t *my_segment_ptr;
0182             enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
0183             ~enable_segment_failsafe() {
0184                 if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress
0185             }
0186         };
0187 
0188         //! Enable segment
0189         template<typename Allocator>
0190         void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) {
0191             typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
0192             typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
0193             bucket_allocator_type bucket_allocator(allocator);
0194             __TBB_ASSERT( k, "Zero segment must be embedded" );
0195             enable_segment_failsafe watchdog( my_table, k );
0196             size_type sz;
0197             __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
0198             if( k >= first_block ) {
0199                 sz = segment_size( k );
0200                 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
0201                 init_buckets( ptr, sz, is_initial );
0202                 itt_hide_store_word( my_table[k], ptr );
0203                 sz <<= 1;// double it to get entire capacity of the container
0204             } else { // the first block
0205                 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
0206                 sz = segment_size( first_block );
0207                 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
0208                 init_buckets( ptr, sz - embedded_buckets, is_initial );
0209                 ptr -= segment_base(embedded_block);
0210                 for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
0211                     itt_hide_store_word( my_table[i], ptr + segment_base(i) );
0212             }
0213             itt_store_word_with_release( my_mask, sz-1 );
0214             watchdog.my_segment_ptr = 0;
0215         }
0216 
0217         template<typename Allocator>
0218         void delete_segment(segment_index_t s, const Allocator& allocator) {
0219             typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
0220             typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
0221             bucket_allocator_type bucket_allocator(allocator);
0222             segment_ptr_t buckets_ptr = my_table[s];
0223             size_type sz = segment_size( s ? s : 1 );
0224 
0225             if( s >= first_block) // the first segment or the next
0226                 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
0227             else if( s == embedded_block && embedded_block != first_block )
0228                 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
0229                                                     segment_size(first_block) - embedded_buckets);
0230             if( s >= embedded_block ) my_table[s] = 0;
0231         }
0232 
0233         //! Get bucket by (masked) hashcode
0234         bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
0235             segment_index_t s = segment_index_of( h );
0236             h -= segment_base(s);
0237             segment_ptr_t seg = my_table[s];
0238             __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
0239             return &seg[h];
0240         }
0241 
0242         // internal serial rehashing helper
0243         void mark_rehashed_levels( hashcode_t h ) throw () {
0244             segment_index_t s = segment_index_of( h );
0245             while( segment_ptr_t seg = my_table[++s] )
0246                 if( seg[h].node_list == rehash_req ) {
0247                     seg[h].node_list = empty_rehashed;
0248                     mark_rehashed_levels( h + ((hashcode_t)1<<s) ); // optimized segment_base(s)
0249                 }
0250         }
0251 
0252         //! Check for mask race
0253         // Splitting into two functions should help inlining
0254         inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
0255             hashcode_t m_now, m_old = m;
0256             m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
0257             if( m_old != m_now )
0258                 return check_rehashing_collision( h, m_old, m = m_now );
0259             return false;
0260         }
0261 
0262         //! Process mask race, check for rehashing collision
0263         bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
0264             __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
0265             if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
0266                 // condition above proves that 'h' has some other bits set beside 'm_old'
0267                 // find next applicable mask after m_old    //TODO: look at bsl instruction
0268                 for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
0269                     ;
0270                 m_old = (m_old<<1) - 1; // get full mask from a bit
0271                 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
0272                 // check whether it is rehashing/ed
0273                 if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
0274                 {
0275 #if __TBB_STATISTICS
0276                     my_info_restarts++; // race collisions
0277 #endif
0278                     return true;
0279                 }
0280             }
0281             return false;
0282         }
0283 
0284         //! Insert a node and check for load factor. @return segment index to enable.
0285         segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
0286             size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
0287             add_to_bucket( b, n );
0288             // check load factor
0289             if( sz >= mask ) { // TODO: add custom load_factor
0290                 segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of
0291                 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
0292                 static const segment_ptr_t is_allocating = (segment_ptr_t)2;
0293                 if( !itt_hide_load_word(my_table[new_seg])
0294                   && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
0295                     return new_seg; // The value must be processed
0296             }
0297             return 0;
0298         }
0299 
0300         //! Prepare enough segments for number of buckets
0301         template<typename Allocator>
0302         void reserve(size_type buckets, const Allocator& allocator) {
0303             if( !buckets-- ) return;
0304             bool is_initial = !my_size;
0305             for( size_type m = my_mask; buckets > m; m = my_mask )
0306                 enable_segment( segment_index_of( m+1 ), allocator, is_initial );
0307         }
0308         //! Swap hash_map_bases
0309         void internal_swap(hash_map_base &table) {
0310             using std::swap;
0311             swap(this->my_mask, table.my_mask);
0312             swap(this->my_size, table.my_size);
0313             for(size_type i = 0; i < embedded_buckets; i++)
0314                 swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
0315             for(size_type i = embedded_block; i < pointers_per_table; i++)
0316                 swap(this->my_table[i], table.my_table[i]);
0317         }
0318 
0319 #if __TBB_CPP11_RVALUE_REF_PRESENT
0320         void internal_move(hash_map_base&& other) {
0321             my_mask = other.my_mask;
0322             other.my_mask = embedded_buckets - 1;
0323             my_size = other.my_size;
0324             other.my_size = 0;
0325 
0326             for(size_type i = 0; i < embedded_buckets; ++i) {
0327                 my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list;
0328                 other.my_embedded_segment[i].node_list = NULL;
0329             }
0330 
0331             for(size_type i = embedded_block; i < pointers_per_table; ++i) {
0332                 my_table[i] = other.my_table[i];
0333                 other.my_table[i] = NULL;
0334             }
0335         }
0336 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
0337     };
0338 
0339     template<typename Iterator>
0340     class hash_map_range;
0341 
0342     //! Meets requirements of a forward iterator for STL */
0343     /** Value is either the T or const T type of the container.
0344         @ingroup containers */
0345     template<typename Container, typename Value>
0346     class hash_map_iterator
0347         : public std::iterator<std::forward_iterator_tag,Value>
0348     {
0349         typedef Container map_type;
0350         typedef typename Container::node node;
0351         typedef hash_map_base::node_base node_base;
0352         typedef hash_map_base::bucket bucket;
0353 
0354         template<typename C, typename T, typename U>
0355         friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
0356 
0357         template<typename C, typename T, typename U>
0358         friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
0359 
0360         template<typename C, typename T, typename U>
0361         friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
0362 
0363         template<typename C, typename U>
0364         friend class hash_map_iterator;
0365 
0366         template<typename I>
0367         friend class hash_map_range;
0368 
0369         void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
0370             size_t k = my_index+1;
0371             __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
0372             while( k <= my_map->my_mask ) {
0373                 // Following test uses 2's-complement wizardry
0374                 if( k&(k-2) ) // not the beginning of a segment
0375                     ++my_bucket;
0376                 else my_bucket = my_map->get_bucket( k );
0377                 my_node = static_cast<node*>( my_bucket->node_list );
0378                 if( hash_map_base::is_valid(my_node) ) {
0379                     my_index = k; return;
0380                 }
0381                 ++k;
0382             }
0383             my_bucket = 0; my_node = 0; my_index = k; // the end
0384         }
0385 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
0386         template<typename Key, typename T, typename HashCompare, typename A>
0387         friend class interface5::concurrent_hash_map;
0388 #else
0389     public: // workaround
0390 #endif
0391         //! concurrent_hash_map over which we are iterating.
0392         const Container *my_map;
0393 
0394         //! Index in hash table for current item
0395         size_t my_index;
0396 
0397         //! Pointer to bucket
0398         const bucket *my_bucket;
0399 
0400         //! Pointer to node that has current item
0401         node *my_node;
0402 
0403         hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
0404 
0405     public:
0406         //! Construct undefined iterator
0407         hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {}
0408         hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
0409             my_map(other.my_map),
0410             my_index(other.my_index),
0411             my_bucket(other.my_bucket),
0412             my_node(other.my_node)
0413         {}
0414 
0415         hash_map_iterator& operator=( const hash_map_iterator<Container,typename Container::value_type> &other ) {
0416             my_map = other.my_map;
0417             my_index = other.my_index;
0418             my_bucket = other.my_bucket;
0419             my_node = other.my_node;
0420             return *this;
0421         }
0422         Value& operator*() const {
0423             __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
0424             return my_node->value();
0425         }
0426         Value* operator->() const {return &operator*();}
0427         hash_map_iterator& operator++();
0428 
0429         //! Post increment
0430         hash_map_iterator operator++(int) {
0431             hash_map_iterator old(*this);
0432             operator++();
0433             return old;
0434         }
0435     };
0436 
0437     template<typename Container, typename Value>
0438     hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
0439         my_map(&map),
0440         my_index(index),
0441         my_bucket(b),
0442         my_node( static_cast<node*>(n) )
0443     {
0444         if( b && !hash_map_base::is_valid(n) )
0445             advance_to_next_bucket();
0446     }
0447 
0448     template<typename Container, typename Value>
0449     hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
0450         my_node = static_cast<node*>( my_node->next );
0451         if( !my_node ) advance_to_next_bucket();
0452         return *this;
0453     }
0454 
0455     template<typename Container, typename T, typename U>
0456     bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
0457         return i.my_node == j.my_node && i.my_map == j.my_map;
0458     }
0459 
0460     template<typename Container, typename T, typename U>
0461     bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
0462         return i.my_node != j.my_node || i.my_map != j.my_map;
0463     }
0464 
0465     //! Range class used with concurrent_hash_map
0466     /** @ingroup containers */
0467     template<typename Iterator>
0468     class hash_map_range {
0469         typedef typename Iterator::map_type map_type;
0470         Iterator my_begin;
0471         Iterator my_end;
0472         mutable Iterator my_midpoint;
0473         size_t my_grainsize;
0474         //! Set my_midpoint to point approximately half way between my_begin and my_end.
0475         void set_midpoint() const;
0476         template<typename U> friend class hash_map_range;
0477     public:
0478         //! Type for size of a range
0479         typedef std::size_t size_type;
0480         typedef typename Iterator::value_type value_type;
0481         typedef typename Iterator::reference reference;
0482         typedef typename Iterator::difference_type difference_type;
0483         typedef Iterator iterator;
0484 
0485         //! True if range is empty.
0486         bool empty() const {return my_begin==my_end;}
0487 
0488         //! True if range can be partitioned into two subranges.
0489         bool is_divisible() const {
0490             return my_midpoint!=my_end;
0491         }
0492         //! Split range.
0493         hash_map_range( hash_map_range& r, split ) :
0494             my_end(r.my_end),
0495             my_grainsize(r.my_grainsize)
0496         {
0497             r.my_end = my_begin = r.my_midpoint;
0498             __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
0499             __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
0500             set_midpoint();
0501             r.set_midpoint();
0502         }
0503         //! type conversion
0504         template<typename U>
0505         hash_map_range( hash_map_range<U>& r) :
0506             my_begin(r.my_begin),
0507             my_end(r.my_end),
0508             my_midpoint(r.my_midpoint),
0509             my_grainsize(r.my_grainsize)
0510         {}
0511         //! Init range with container and grainsize specified
0512         hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
0513             my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
0514             my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
0515             my_grainsize( grainsize_ )
0516         {
0517             __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
0518             set_midpoint();
0519         }
0520         const Iterator& begin() const {return my_begin;}
0521         const Iterator& end() const {return my_end;}
0522         //! The grain size for this range.
0523         size_type grainsize() const {return my_grainsize;}
0524     };
0525 
0526     template<typename Iterator>
0527     void hash_map_range<Iterator>::set_midpoint() const {
0528         // Split by groups of nodes
0529         size_t m = my_end.my_index-my_begin.my_index;
0530         if( m > my_grainsize ) {
0531             m = my_begin.my_index + m/2u;
0532             hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
0533             my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
0534         } else {
0535             my_midpoint = my_end;
0536         }
0537         __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
0538             "my_begin is after my_midpoint" );
0539         __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
0540             "my_midpoint is after my_end" );
0541         __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
0542             "[my_begin, my_midpoint) range should not be empty" );
0543     }
0544 
0545     } // internal
0546 //! @endcond
0547 
0548 #if _MSC_VER && !defined(__INTEL_COMPILER)
0549     // Suppress "conditional expression is constant" warning.
0550     #pragma warning( push )
0551     #pragma warning( disable: 4127 )
0552 #endif
0553 
0554 //! Unordered map from Key to T.
0555 /** concurrent_hash_map is associative container with concurrent access.
0556 
0557 @par Compatibility
0558     The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1).
0559 
0560 @par Exception Safety
0561     - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors.
0562     - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment).
0563     - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results.
0564 
0565 @par Changes since TBB 2.1
0566     - Replaced internal algorithm and data structure. Patent is pending.
0567     - Added buckets number argument for constructor
0568 
0569 @par Changes since TBB 2.0
0570     - Fixed exception-safety
0571     - Added template argument for allocator
0572     - Added allocator argument in constructors
0573     - Added constructor from a range of iterators
0574     - Added several new overloaded insert() methods
0575     - Added get_allocator()
0576     - Added swap()
0577     - Added count()
0578     - Added overloaded erase(accessor &) and erase(const_accessor&)
0579     - Added equal_range() [const]
0580     - Added [const_]pointer, [const_]reference, and allocator_type types
0581     - Added global functions: operator==(), operator!=(), and swap()
0582 
0583     @ingroup containers */
0584 template<typename Key, typename T, typename HashCompare, typename Allocator>
0585 class concurrent_hash_map : protected internal::hash_map_base {
0586     template<typename Container, typename Value>
0587     friend class internal::hash_map_iterator;
0588 
0589     template<typename I>
0590     friend class internal::hash_map_range;
0591 
0592 public:
0593     typedef Key key_type;
0594     typedef T mapped_type;
0595     typedef std::pair<const Key,T> value_type;
0596     typedef hash_map_base::size_type size_type;
0597     typedef ptrdiff_t difference_type;
0598     typedef value_type *pointer;
0599     typedef const value_type *const_pointer;
0600     typedef value_type &reference;
0601     typedef const value_type &const_reference;
0602     typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
0603     typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
0604     typedef internal::hash_map_range<iterator> range_type;
0605     typedef internal::hash_map_range<const_iterator> const_range_type;
0606     typedef Allocator allocator_type;
0607 
0608 protected:
0609     friend class const_accessor;
0610     class node;
0611     typedef typename tbb::internal::allocator_rebind<Allocator, node>::type node_allocator_type;
0612     typedef tbb::internal::allocator_traits<node_allocator_type> node_allocator_traits;
0613     node_allocator_type my_allocator;
0614     HashCompare my_hash_compare;
0615 
0616     class node : public node_base {
0617         tbb::aligned_space<value_type> my_value;
0618     public:
0619         value_type* storage() { return my_value.begin(); }
0620         value_type& value() { return *storage(); }
0621     };
0622 
0623     void delete_node( node_base *n ) {
0624         node_allocator_traits::destroy(my_allocator, static_cast<node*>(n)->storage());
0625         node_allocator_traits::destroy(my_allocator, static_cast<node*>(n));
0626         node_allocator_traits::deallocate(my_allocator, static_cast<node*>(n), 1);
0627     }
0628 
0629     struct node_scoped_guard : tbb::internal::no_copy {
0630         node* my_node;
0631         node_allocator_type& my_alloc;
0632 
0633         node_scoped_guard(node* n, node_allocator_type& alloc) : my_node(n), my_alloc(alloc) {}
0634         ~node_scoped_guard() {
0635             if(my_node) {
0636                 node_allocator_traits::destroy(my_alloc, my_node);
0637                 node_allocator_traits::deallocate(my_alloc, my_node, 1);
0638             }
0639         }
0640         void dismiss() { my_node = NULL; }
0641     };
0642 
0643 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
0644     template<typename... Args>
0645     static node* create_node(node_allocator_type& allocator, Args&&... args)
0646 #else
0647     template<typename Arg1, typename Arg2>
0648     static node* create_node(node_allocator_type& allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2)
0649 #endif
0650     {
0651         node* node_ptr = node_allocator_traits::allocate(allocator, 1);
0652         node_scoped_guard guard(node_ptr, allocator);
0653         node_allocator_traits::construct(allocator, node_ptr);
0654 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
0655         node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward<Args>(args)...);
0656 #else
0657         node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward<Arg1>(arg1), tbb::internal::forward<Arg2>(arg2));
0658 #endif
0659         guard.dismiss();
0660         return node_ptr;
0661     }
0662 
0663     static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){
0664         return create_node(allocator, key, *t);
0665     }
0666 
0667 #if __TBB_CPP11_RVALUE_REF_PRESENT
0668     static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){
0669         return create_node(allocator, key, std::move(*const_cast<T*>(t)));
0670     }
0671 #endif
0672 
0673     static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){
0674 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
0675         // Emplace construct an empty T object inside the pair
0676         return create_node(allocator, std::piecewise_construct,
0677                            std::forward_as_tuple(key), std::forward_as_tuple());
0678 #else
0679         // Use of a temporary object is impossible, because create_node takes a non-const reference.
0680         // copy-initialization is possible because T is already required to be CopyConstructible.
0681         T obj = T();
0682         return create_node(allocator, key, tbb::internal::move(obj));
0683 #endif
0684     }
0685 
0686     static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){
0687         __TBB_ASSERT(false,"this dummy function should not be called");
0688         return NULL;
0689     }
0690 
0691     node *search_bucket( const key_type &key, bucket *b ) const {
0692         node *n = static_cast<node*>( b->node_list );
0693         while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) )
0694             n = static_cast<node*>( n->next );
0695         __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
0696         return n;
0697     }
0698 
0699     //! bucket accessor is to find, rehash, acquire a lock, and access a bucket
0700     class bucket_accessor : public bucket::scoped_t {
0701         bucket *my_b;
0702     public:
0703         bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
0704         //! find a bucket by masked hashcode, optionally rehash, and acquire the lock
0705         inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
0706             my_b = base->get_bucket( h );
0707             // TODO: actually, notification is unnecessary here, just hiding double-check
0708             if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req
0709                 && try_acquire( my_b->mutex, /*write=*/true ) )
0710             {
0711                 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing
0712             }
0713             else bucket::scoped_t::acquire( my_b->mutex, writer );
0714             __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
0715         }
0716         //! check whether bucket is locked for write
0717         bool is_writer() { return bucket::scoped_t::is_writer; }
0718         //! get bucket pointer
0719         bucket *operator() () { return my_b; }
0720     };
0721 
0722     // TODO refactor to hash_base
0723     void rehash_bucket( bucket *b_new, const hashcode_t h ) {
0724         __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
0725         __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
0726         __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed
0727         hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
0728 #if __TBB_STATISTICS
0729         my_info_rehashes++; // invocations of rehash_bucket
0730 #endif
0731 
0732         bucket_accessor b_old( this, h & mask );
0733 
0734         mask = (mask<<1) | 1; // get full mask for new bucket
0735         __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
0736     restart:
0737         for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
0738             hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->value().first );
0739 #if TBB_USE_ASSERT
0740             hashcode_t bmask = h & (mask>>1);
0741             bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
0742             __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
0743 #endif
0744             if( (c & mask) == h ) {
0745                 if( !b_old.is_writer() )
0746                     if( !b_old.upgrade_to_writer() ) {
0747                         goto restart; // node ptr can be invalid due to concurrent erase
0748                     }
0749                 *p = n->next; // exclude from b_old
0750                 add_to_bucket( b_new, n );
0751             } else p = &n->next; // iterate to next item
0752         }
0753     }
0754 
0755     struct call_clear_on_leave {
0756         concurrent_hash_map* my_ch_map;
0757         call_clear_on_leave( concurrent_hash_map* a_ch_map ) : my_ch_map(a_ch_map) {}
0758         void dismiss() {my_ch_map = 0;}
0759         ~call_clear_on_leave(){
0760             if (my_ch_map){
0761                 my_ch_map->clear();
0762             }
0763         }
0764     };
0765 public:
0766 
0767     class accessor;
0768     //! Combines data access, locking, and garbage collection.
0769     class const_accessor : private node::scoped_t /*which derived from no_copy*/ {
0770         friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
0771         friend class accessor;
0772     public:
0773         //! Type of value
0774         typedef const typename concurrent_hash_map::value_type value_type;
0775 
0776         //! True if result is empty.
0777         bool empty() const { return !my_node; }
0778 
0779         //! Set to null
0780         void release() {
0781             if( my_node ) {
0782                 node::scoped_t::release();
0783                 my_node = 0;
0784             }
0785         }
0786 
0787         //! Return reference to associated value in hash table.
0788         const_reference operator*() const {
0789             __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
0790             return my_node->value();
0791         }
0792 
0793         //! Return pointer to associated value in hash table.
0794         const_pointer operator->() const {
0795             return &operator*();
0796         }
0797 
0798         //! Create empty result
0799         const_accessor() : my_node(NULL) {}
0800 
0801         //! Destroy result after releasing the underlying reference.
0802         ~const_accessor() {
0803             my_node = NULL; // scoped lock's release() is called in its destructor
0804         }
0805     protected:
0806         bool is_writer() { return node::scoped_t::is_writer; }
0807         node *my_node;
0808         hashcode_t my_hash;
0809     };
0810 
0811     //! Allows write access to elements and combines data access, locking, and garbage collection.
0812     class accessor: public const_accessor {
0813     public:
0814         //! Type of value
0815         typedef typename concurrent_hash_map::value_type value_type;
0816 
0817         //! Return reference to associated value in hash table.
0818         reference operator*() const {
0819             __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
0820             return this->my_node->value();
0821         }
0822 
0823         //! Return pointer to associated value in hash table.
0824         pointer operator->() const {
0825             return &operator*();
0826         }
0827     };
0828 
0829     //! Construct empty table.
0830     explicit concurrent_hash_map( const allocator_type &a = allocator_type() )
0831         : internal::hash_map_base(), my_allocator(a)
0832     {}
0833 
0834     explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() )
0835         : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
0836     {}
0837 
0838     //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
0839     concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() )
0840         : internal::hash_map_base(), my_allocator(a)
0841     {
0842         reserve( n, my_allocator );
0843     }
0844 
0845     concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() )
0846         : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
0847     {
0848         reserve( n, my_allocator );
0849     }
0850 
0851     //! Copy constructor
0852     concurrent_hash_map( const concurrent_hash_map &table )
0853         : internal::hash_map_base(),
0854           my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
0855     {
0856         call_clear_on_leave scope_guard(this);
0857         internal_copy(table);
0858         scope_guard.dismiss();
0859     }
0860 
0861     concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a)
0862         : internal::hash_map_base(), my_allocator(a)
0863     {
0864         call_clear_on_leave scope_guard(this);
0865         internal_copy(table);
0866         scope_guard.dismiss();
0867     }
0868 
0869 #if __TBB_CPP11_RVALUE_REF_PRESENT
0870     //! Move constructor
0871     concurrent_hash_map( concurrent_hash_map &&table )
0872         : internal::hash_map_base(), my_allocator(std::move(table.get_allocator()))
0873     {
0874         internal_move(std::move(table));
0875     }
0876 
0877     //! Move constructor
0878     concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a )
0879         : internal::hash_map_base(), my_allocator(a)
0880     {
0881         if (a == table.get_allocator()){
0882             internal_move(std::move(table));
0883         }else{
0884             call_clear_on_leave scope_guard(this);
0885             internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
0886             scope_guard.dismiss();
0887         }
0888     }
0889 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
0890 
0891     //! Construction with copying iteration range and given allocator instance
0892     template<typename I>
0893     concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() )
0894         : internal::hash_map_base(), my_allocator(a)
0895     {
0896         call_clear_on_leave scope_guard(this);
0897         internal_copy(first, last, std::distance(first, last));
0898         scope_guard.dismiss();
0899     }
0900 
0901     template<typename I>
0902     concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() )
0903         : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
0904     {
0905         call_clear_on_leave scope_guard(this);
0906         internal_copy(first, last, std::distance(first, last));
0907         scope_guard.dismiss();
0908     }
0909 
0910 #if __TBB_INITIALIZER_LISTS_PRESENT
0911     //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
0912     concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type &a = allocator_type() )
0913         : internal::hash_map_base(), my_allocator(a)
0914     {
0915         call_clear_on_leave scope_guard(this);
0916         internal_copy(il.begin(), il.end(), il.size());
0917         scope_guard.dismiss();
0918     }
0919 
0920     concurrent_hash_map( std::initializer_list<value_type> il, const HashCompare& compare, const allocator_type& a = allocator_type() )
0921         : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
0922     {
0923         call_clear_on_leave scope_guard(this);
0924         internal_copy(il.begin(), il.end(), il.size());
0925         scope_guard.dismiss();
0926     }
0927 
0928 #endif //__TBB_INITIALIZER_LISTS_PRESENT
0929 
0930     //! Assignment
0931     concurrent_hash_map& operator=( const concurrent_hash_map &table ) {
0932         if( this!=&table ) {
0933             typedef typename node_allocator_traits::propagate_on_container_copy_assignment pocca_type;
0934             clear();
0935             tbb::internal::allocator_copy_assignment(my_allocator, table.my_allocator, pocca_type());
0936             internal_copy(table);
0937         }
0938         return *this;
0939     }
0940 
0941 #if __TBB_CPP11_RVALUE_REF_PRESENT
0942     //! Move Assignment
0943     concurrent_hash_map& operator=( concurrent_hash_map &&table ) {
0944         if(this != &table) {
0945             typedef typename node_allocator_traits::propagate_on_container_move_assignment pocma_type;
0946             internal_move_assign(std::move(table), pocma_type());
0947         }
0948         return *this;
0949     }
0950 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
0951 
0952 #if __TBB_INITIALIZER_LISTS_PRESENT
0953     //! Assignment
0954     concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
0955         clear();
0956         internal_copy(il.begin(), il.end(), il.size());
0957         return *this;
0958     }
0959 #endif //__TBB_INITIALIZER_LISTS_PRESENT
0960 
0961 
0962     //! Rehashes and optionally resizes the whole table.
0963     /** Useful to optimize performance before or after concurrent operations.
0964         Also enables using of find() and count() concurrent methods in serial context. */
0965     void rehash(size_type n = 0);
0966 
0967     //! Clear table
0968     void clear();
0969 
0970     //! Clear table and destroy it.
0971     ~concurrent_hash_map() { clear(); }
0972 
0973     //------------------------------------------------------------------------
0974     // Parallel algorithm support
0975     //------------------------------------------------------------------------
0976     range_type range( size_type grainsize=1 ) {
0977         return range_type( *this, grainsize );
0978     }
0979     const_range_type range( size_type grainsize=1 ) const {
0980         return const_range_type( *this, grainsize );
0981     }
0982 
0983     //------------------------------------------------------------------------
0984     // STL support - not thread-safe methods
0985     //------------------------------------------------------------------------
0986     iterator begin() { return iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
0987     iterator end() { return iterator( *this, 0, 0, 0 ); }
0988     const_iterator begin() const { return const_iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
0989     const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); }
0990     std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
0991     std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
0992 
0993     //! Number of items in table.
0994     size_type size() const { return my_size; }
0995 
0996     //! True if size()==0.
0997     bool empty() const { return my_size == 0; }
0998 
0999     //! Upper bound on size.
1000     size_type max_size() const {return (~size_type(0))/sizeof(node);}
1001 
1002     //! Returns the current number of buckets
1003     size_type bucket_count() const { return my_mask+1; }
1004 
1005     //! return allocator object
1006     allocator_type get_allocator() const { return this->my_allocator; }
1007 
1008     //! swap two instances. Iterators are invalidated
1009     void swap( concurrent_hash_map &table );
1010 
1011     //------------------------------------------------------------------------
1012     // concurrent map operations
1013     //------------------------------------------------------------------------
1014 
1015     //! Return count of items (0 or 1)
1016     size_type count( const Key &key ) const {
1017         return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node );
1018     }
1019 
1020     //! Find item and acquire a read lock on the item.
1021     /** Return true if item is found, false otherwise. */
1022     bool find( const_accessor &result, const Key &key ) const {
1023         result.release();
1024         return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node );
1025     }
1026 
1027     //! Find item and acquire a write lock on the item.
1028     /** Return true if item is found, false otherwise. */
1029     bool find( accessor &result, const Key &key ) {
1030         result.release();
1031         return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node );
1032     }
1033 
1034     //! Insert item (if not already present) and acquire a read lock on the item.
1035     /** Returns true if item is new. */
1036     bool insert( const_accessor &result, const Key &key ) {
1037         result.release();
1038         return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct );
1039     }
1040 
1041     //! Insert item (if not already present) and acquire a write lock on the item.
1042     /** Returns true if item is new. */
1043     bool insert( accessor &result, const Key &key ) {
1044         result.release();
1045         return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct );
1046     }
1047 
1048     //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
1049     /** Returns true if item is new. */
1050     bool insert( const_accessor &result, const value_type &value ) {
1051         result.release();
1052         return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct );
1053     }
1054 
1055     //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
1056     /** Returns true if item is new. */
1057     bool insert( accessor &result, const value_type &value ) {
1058         result.release();
1059         return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct );
1060     }
1061 
1062     //! Insert item by copying if there is no such key present already
1063     /** Returns true if item is inserted. */
1064     bool insert( const value_type &value ) {
1065         return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct );
1066     }
1067 
1068 #if __TBB_CPP11_RVALUE_REF_PRESENT
1069     //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
1070     /** Returns true if item is new. */
1071     bool insert( const_accessor &result, value_type && value ) {
1072         return generic_move_insert(result, std::move(value));
1073     }
1074 
1075     //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
1076     /** Returns true if item is new. */
1077     bool insert( accessor &result, value_type && value ) {
1078         return generic_move_insert(result, std::move(value));
1079     }
1080 
1081     //! Insert item by copying if there is no such key present already
1082     /** Returns true if item is inserted. */
1083     bool insert( value_type && value ) {
1084         return generic_move_insert(accessor_not_used(), std::move(value));
1085     }
1086 
1087 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1088     //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
1089     /** Returns true if item is new. */
1090     template<typename... Args>
1091     bool emplace( const_accessor &result, Args&&... args ) {
1092         return generic_emplace(result, std::forward<Args>(args)...);
1093     }
1094 
1095     //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
1096     /** Returns true if item is new. */
1097     template<typename... Args>
1098     bool emplace( accessor &result, Args&&... args ) {
1099         return generic_emplace(result, std::forward<Args>(args)...);
1100     }
1101 
1102     //! Insert item by copying if there is no such key present already
1103     /** Returns true if item is inserted. */
1104     template<typename... Args>
1105     bool emplace( Args&&... args ) {
1106         return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1107     }
1108 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1109 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1110 
1111     //! Insert range [first, last)
1112     template<typename I>
1113     void insert( I first, I last ) {
1114         for ( ; first != last; ++first )
1115             insert( *first );
1116     }
1117 
1118 #if __TBB_INITIALIZER_LISTS_PRESENT
1119     //! Insert initializer list
1120     void insert( std::initializer_list<value_type> il ) {
1121         insert( il.begin(), il.end() );
1122     }
1123 #endif //__TBB_INITIALIZER_LISTS_PRESENT
1124 
1125     //! Erase item.
1126     /** Return true if item was erased by particularly this call. */
1127     bool erase( const Key& key );
1128 
1129     //! Erase item by const_accessor.
1130     /** Return true if item was erased by particularly this call. */
1131     bool erase( const_accessor& item_accessor ) {
1132         return exclude( item_accessor );
1133     }
1134 
1135     //! Erase item by accessor.
1136     /** Return true if item was erased by particularly this call. */
1137     bool erase( accessor& item_accessor ) {
1138         return exclude( item_accessor );
1139     }
1140 
1141 protected:
1142     //! Insert or find item and optionally acquire a lock on the item.
1143     bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write,  node* (*allocate_node)(node_allocator_type& ,  const Key &, const T * ), node *tmp_n = 0  ) ;
1144 
1145     struct accessor_not_used { void release(){}};
1146     friend const_accessor* accessor_location( accessor_not_used const& ){ return NULL;}
1147     friend const_accessor* accessor_location( const_accessor & a )      { return &a;}
1148 
1149     friend bool is_write_access_needed( accessor const& )           { return true;}
1150     friend bool is_write_access_needed( const_accessor const& )     { return false;}
1151     friend bool is_write_access_needed( accessor_not_used const& )  { return false;}
1152 
1153 #if __TBB_CPP11_RVALUE_REF_PRESENT
1154     template<typename Accessor>
1155     bool generic_move_insert( Accessor && result, value_type && value ) {
1156         result.release();
1157         return lookup(/*insert*/true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1158     }
1159 
1160 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1161     template<typename Accessor, typename... Args>
1162     bool generic_emplace( Accessor && result, Args &&... args ) {
1163         result.release();
1164         node * node_ptr = create_node(my_allocator, std::forward<Args>(args)...);
1165         return lookup(/*insert*/true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
1166     }
1167 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1168 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1169 
1170     //! delete item by accessor
1171     bool exclude( const_accessor &item_accessor );
1172 
1173     //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
1174     template<typename I>
1175     std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
1176 
1177     //! Copy "source" to *this, where *this must start out empty.
1178     void internal_copy( const concurrent_hash_map& source );
1179 
1180     template<typename I>
1181     void internal_copy( I first, I last, size_type reserve_size );
1182 
1183 #if __TBB_CPP11_RVALUE_REF_PRESENT
1184     // A compile-time dispatch to allow move assignment of containers with non-movable value_type if POCMA is true_type
1185     void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_true_type) {
1186         tbb::internal::allocator_move_assignment(my_allocator, other.my_allocator, tbb::internal::traits_true_type());
1187         internal_move(std::move(other));
1188     }
1189 
1190     void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_false_type) {
1191         if (this->my_allocator == other.my_allocator) {
1192             internal_move(std::move(other));
1193         } else {
1194             //do per element move
1195             internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1196         }
1197     }
1198 #endif
1199 
1200     //! Fast find when no concurrent erasure is used. For internal use inside TBB only!
1201     /** Return pointer to item with given key, or NULL if no such item exists.
1202         Must not be called concurrently with erasure operations. */
1203     const_pointer internal_fast_find( const Key& key ) const {
1204         hashcode_t h = my_hash_compare.hash( key );
1205         hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1206         node *n;
1207     restart:
1208         __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1209         bucket *b = get_bucket( h & m );
1210         // TODO: actually, notification is unnecessary here, just hiding double-check
1211         if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req )
1212         {
1213             bucket::scoped_t lock;
1214             if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
1215                 if( b->node_list == internal::rehash_req)
1216                     const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
1217             }
1218             else lock.acquire( b->mutex, /*write=*/false );
1219             __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
1220         }
1221         n = search_bucket( key, b );
1222         if( n )
1223             return n->storage();
1224         else if( check_mask_race( h, m ) )
1225             goto restart;
1226         return 0;
1227     }
1228 };
1229 
1230 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1231 namespace internal {
1232 using namespace tbb::internal;
1233 
1234 template<template<typename...> typename Map, typename Key, typename T, typename... Args>
1235 using hash_map_t = Map<
1236     Key, T,
1237     std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1238                         pack_element_t<0, Args...>, tbb_hash_compare<Key> >,
1239     std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t<sizeof...(Args)-1, Args...> >,
1240                          pack_element_t<sizeof...(Args)-1, Args...>, tbb_allocator<std::pair<const Key, T> > >
1241 >;
1242 }
1243 
1244 // Deduction guide for the constructor from two iterators and hash_compare/ allocator
1245 template<typename I, typename... Args>
1246 concurrent_hash_map(I, I, Args...)
1247 -> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1248 
1249 // Deduction guide for the constructor from an initializer_list and hash_compare/ allocator
1250 // Deduction guide for an initializer_list, hash_compare and allocator is implicit
1251 template<typename Key, typename T, typename CompareOrAllocator>
1252 concurrent_hash_map(std::initializer_list<std::pair<const Key, T>>, CompareOrAllocator)
1253 -> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1254 
1255 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1256 
1257 template<typename Key, typename T, typename HashCompare, typename A>
1258 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) {
1259     __TBB_ASSERT( !result || !result->my_node, NULL );
1260     bool return_value;
1261     hashcode_t const h = my_hash_compare.hash( key );
1262     hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1263     segment_index_t grow_segment = 0;
1264     node *n;
1265     restart:
1266     {//lock scope
1267         __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1268         return_value = false;
1269         // get bucket
1270         bucket_accessor b( this, h & m );
1271 
1272         // find a node
1273         n = search_bucket( key, b() );
1274         if( op_insert ) {
1275             // [opt] insert a key
1276             if( !n ) {
1277                 if( !tmp_n ) {
1278                     tmp_n = allocate_node(my_allocator, key, t);
1279                 }
1280                 if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1281                     // Rerun search_list, in case another thread inserted the item during the upgrade.
1282                     n = search_bucket( key, b() );
1283                     if( is_valid(n) ) { // unfortunately, it did
1284                         b.downgrade_to_reader();
1285                         goto exists;
1286                     }
1287                 }
1288                 if( check_mask_race(h, m) )
1289                     goto restart; // b.release() is done in ~b().
1290                 // insert and set flag to grow the container
1291                 grow_segment = insert_new_node( b(), n = tmp_n, m );
1292                 tmp_n = 0;
1293                 return_value = true;
1294             }
1295         } else { // find or count
1296             if( !n ) {
1297                 if( check_mask_race( h, m ) )
1298                     goto restart; // b.release() is done in ~b(). TODO: replace by continue
1299                 return false;
1300             }
1301             return_value = true;
1302         }
1303     exists:
1304         if( !result ) goto check_growth;
1305         // TODO: the following seems as generic/regular operation
1306         // acquire the item
1307         if( !result->try_acquire( n->mutex, write ) ) {
1308             for( tbb::internal::atomic_backoff backoff(true);; ) {
1309                 if( result->try_acquire( n->mutex, write ) ) break;
1310                 if( !backoff.bounded_pause() ) {
1311                     // the wait takes really long, restart the operation
1312                     b.release();
1313                     __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1314                     __TBB_Yield();
1315                     m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1316                     goto restart;
1317                 }
1318             }
1319         }
1320     }//lock scope
1321     result->my_node = n;
1322     result->my_hash = h;
1323 check_growth:
1324     // [opt] grow the container
1325     if( grow_segment ) {
1326 #if __TBB_STATISTICS
1327         my_info_resizes++; // concurrent ones
1328 #endif
1329         enable_segment( grow_segment, my_allocator );
1330     }
1331     if( tmp_n ) // if op_insert only
1332         delete_node( tmp_n );
1333     return return_value;
1334 }
1335 
1336 template<typename Key, typename T, typename HashCompare, typename A>
1337 template<typename I>
1338 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
1339     hashcode_t h = my_hash_compare.hash( key );
1340     hashcode_t m = my_mask;
1341     __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1342     h &= m;
1343     bucket *b = get_bucket( h );
1344     while( b->node_list == internal::rehash_req ) {
1345         m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1346         b = get_bucket( h &= m );
1347     }
1348     node *n = search_bucket( key, b );
1349     if( !n )
1350         return std::make_pair(end_, end_);
1351     iterator lower(*this, h, b, n), upper(lower);
1352     return std::make_pair(lower, ++upper);
1353 }
1354 
1355 template<typename Key, typename T, typename HashCompare, typename A>
1356 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor ) {
1357     __TBB_ASSERT( item_accessor.my_node, NULL );
1358     node_base *const n = item_accessor.my_node;
1359     hashcode_t const h = item_accessor.my_hash;
1360     hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1361     do {
1362         // get bucket
1363         bucket_accessor b( this, h & m, /*writer=*/true );
1364         node_base **p = &b()->node_list;
1365         while( *p && *p != n )
1366             p = &(*p)->next;
1367         if( !*p ) { // someone else was first
1368             if( check_mask_race( h, m ) )
1369                 continue;
1370             item_accessor.release();
1371             return false;
1372         }
1373         __TBB_ASSERT( *p == n, NULL );
1374         *p = n->next; // remove from container
1375         my_size--;
1376         break;
1377     } while(true);
1378     if( !item_accessor.is_writer() ) // need to get exclusive lock
1379         item_accessor.upgrade_to_writer(); // return value means nothing here
1380     item_accessor.release();
1381     delete_node( n ); // Only one thread can delete it
1382     return true;
1383 }
1384 
1385 template<typename Key, typename T, typename HashCompare, typename A>
1386 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
1387     node_base *n;
1388     hashcode_t const h = my_hash_compare.hash( key );
1389     hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1390 restart:
1391     {//lock scope
1392         // get bucket
1393         bucket_accessor b( this, h & m );
1394     search:
1395         node_base **p = &b()->node_list;
1396         n = *p;
1397         while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->value().first ) ) {
1398             p = &n->next;
1399             n = *p;
1400         }
1401         if( !n ) { // not found, but mask could be changed
1402             if( check_mask_race( h, m ) )
1403                 goto restart;
1404             return false;
1405         }
1406         else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1407             if( check_mask_race( h, m ) ) // contended upgrade, check mask
1408                 goto restart;
1409             goto search;
1410         }
1411         *p = n->next;
1412         my_size--;
1413     }
1414     {
1415         typename node::scoped_t item_locker( n->mutex, /*write=*/true );
1416     }
1417     // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1418     delete_node( n ); // Only one thread can delete it due to write lock on the bucket
1419     return true;
1420 }
1421 
1422 template<typename Key, typename T, typename HashCompare, typename A>
1423 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
1424     typedef typename node_allocator_traits::propagate_on_container_swap pocs_type;
1425     if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) {
1426         using std::swap;
1427         tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type());
1428         swap(this->my_hash_compare, table.my_hash_compare);
1429         internal_swap(table);
1430     }
1431 }
1432 
1433 template<typename Key, typename T, typename HashCompare, typename A>
1434 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
1435     reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well
1436     hashcode_t mask = my_mask;
1437     hashcode_t b = (mask+1)>>1; // size or first index of the last segment
1438     __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2
1439     bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
1440     for(; b <= mask; b++, bp++ ) {
1441         node_base *n = bp->node_list;
1442         __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1443         __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1444         if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
1445             hashcode_t h = b; bucket *b_old = bp;
1446             do {
1447                 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1448                 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1449                 b_old = get_bucket( h &= m );
1450             } while( b_old->node_list == internal::rehash_req );
1451             // now h - is index of the root rehashed bucket b_old
1452             mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
1453             for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1454                 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->value().first );
1455                 if( (c & mask) != h ) { // should be rehashed
1456                     *p = q->next; // exclude from b_old
1457                     bucket *b_new = get_bucket( c & mask );
1458                     __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1459                     add_to_bucket( b_new, q );
1460                 } else p = &q->next; // iterate to next item
1461             }
1462         }
1463     }
1464 #if TBB_USE_PERFORMANCE_WARNINGS
1465     int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1466     static bool reported = false;
1467 #endif
1468 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1469     for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
1470         if( b & (b-2) ) ++bp; // not the beginning of a segment
1471         else bp = get_bucket( b );
1472         node_base *n = bp->node_list;
1473         __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1474         __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1475 #if TBB_USE_PERFORMANCE_WARNINGS
1476         if( n == internal::empty_rehashed ) empty_buckets++;
1477         else if( n->next ) overpopulated_buckets++;
1478 #endif
1479 #if TBB_USE_ASSERT
1480         for( ; is_valid(n); n = n->next ) {
1481             hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first ) & mask;
1482             __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1483         }
1484 #endif
1485     }
1486 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1487 #if TBB_USE_PERFORMANCE_WARNINGS
1488     if( buckets > current_size) empty_buckets -= buckets - current_size;
1489     else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1490     if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1491         tbb::internal::runtime_warning(
1492             "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d  Empties: %d  Overlaps: %d",
1493 #if __TBB_USE_OPTIONAL_RTTI
1494             typeid(*this).name(),
1495 #else
1496             "concurrent_hash_map",
1497 #endif
1498             current_size, empty_buckets, overpopulated_buckets );
1499         reported = true;
1500     }
1501 #endif
1502 }
1503 
1504 template<typename Key, typename T, typename HashCompare, typename A>
1505 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
1506     hashcode_t m = my_mask;
1507     __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1508 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1509 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1510     int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1511     static bool reported = false;
1512 #endif
1513     bucket *bp = 0;
1514     // check consistency
1515     for( segment_index_t b = 0; b <= m; b++ ) {
1516         if( b & (b-2) ) ++bp; // not the beginning of a segment
1517         else bp = get_bucket( b );
1518         node_base *n = bp->node_list;
1519         __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1520         __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1521 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1522         if( n == internal::empty_rehashed ) empty_buckets++;
1523         else if( n == internal::rehash_req ) buckets--;
1524         else if( n->next ) overpopulated_buckets++;
1525 #endif
1526 #if __TBB_EXTRA_DEBUG
1527         for(; is_valid(n); n = n->next ) {
1528             hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first );
1529             h &= m;
1530             __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1531         }
1532 #endif
1533     }
1534 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1535 #if __TBB_STATISTICS
1536     printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1537         " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1538         current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
1539         unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
1540     my_info_resizes = 0; // concurrent ones
1541     my_info_restarts = 0; // race collisions
1542     my_info_rehashes = 0;  // invocations of rehash_bucket
1543 #endif
1544     if( buckets > current_size) empty_buckets -= buckets - current_size;
1545     else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1546     if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1547         tbb::internal::runtime_warning(
1548             "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d  Empties: %d  Overlaps: %d",
1549 #if __TBB_USE_OPTIONAL_RTTI
1550             typeid(*this).name(),
1551 #else
1552             "concurrent_hash_map",
1553 #endif
1554             current_size, empty_buckets, overpopulated_buckets );
1555         reported = true;
1556     }
1557 #endif
1558 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1559     my_size = 0;
1560     segment_index_t s = segment_index_of( m );
1561     __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1562     do {
1563         __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1564         segment_ptr_t buckets_ptr = my_table[s];
1565         size_type sz = segment_size( s ? s : 1 );
1566         for( segment_index_t i = 0; i < sz; i++ )
1567             for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1568                 buckets_ptr[i].node_list = n->next;
1569                 delete_node( n );
1570             }
1571         delete_segment(s, my_allocator);
1572     } while(s-- > 0);
1573     my_mask = embedded_buckets - 1;
1574 }
1575 
1576 template<typename Key, typename T, typename HashCompare, typename A>
1577 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
1578     hashcode_t mask = source.my_mask;
1579     if( my_mask == mask ) { // optimized version
1580         reserve( source.my_size, my_allocator ); // TODO: load_factor?
1581         bucket *dst = 0, *src = 0;
1582         bool rehash_required = false;
1583         for( hashcode_t k = 0; k <= mask; k++ ) {
1584             if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1585             else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1586             __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1587             node *n = static_cast<node*>( src->node_list );
1588             if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
1589                 rehash_required = true;
1590                 dst->node_list = internal::rehash_req;
1591             } else for(; n; n = static_cast<node*>( n->next ) ) {
1592                 node* node_ptr = create_node(my_allocator, n->value().first, n->value().second);
1593                 add_to_bucket( dst, node_ptr);
1594                 ++my_size; // TODO: replace by non-atomic op
1595             }
1596         }
1597         if( rehash_required ) rehash();
1598     } else internal_copy( source.begin(), source.end(), source.my_size );
1599 }
1600 
1601 template<typename Key, typename T, typename HashCompare, typename A>
1602 template<typename I>
1603 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last, size_type reserve_size) {
1604     reserve( reserve_size, my_allocator ); // TODO: load_factor?
1605     hashcode_t m = my_mask;
1606     for(; first != last; ++first) {
1607         hashcode_t h = my_hash_compare.hash( (*first).first );
1608         bucket *b = get_bucket( h & m );
1609         __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1610         node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1611         add_to_bucket( b, node_ptr );
1612         ++my_size; // TODO: replace by non-atomic op
1613     }
1614 }
1615 
1616 } // namespace interface5
1617 
1618 using interface5::concurrent_hash_map;
1619 
1620 
1621 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1622 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
1623     if(a.size() != b.size()) return false;
1624     typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
1625     typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
1626     for(; i != i_end; ++i) {
1627         j = b.equal_range(i->first).first;
1628         if( j == j_end || !(i->second == j->second) ) return false;
1629     }
1630     return true;
1631 }
1632 
1633 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1634 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
1635 {    return !(a == b); }
1636 
1637 template<typename Key, typename T, typename HashCompare, typename A>
1638 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
1639 {    a.swap( b ); }
1640 
1641 #if _MSC_VER && !defined(__INTEL_COMPILER)
1642     #pragma warning( pop )
1643 #endif // warning 4127 is back
1644 
1645 } // namespace tbb
1646 
1647 #include "internal/_warning_suppress_disable_notice.h"
1648 #undef __TBB_concurrent_hash_map_H_include_area
1649 
1650 #endif /* __TBB_concurrent_hash_map_H */