File indexing completed on 2025-01-18 10:12:52
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef __TBB_concurrent_hash_map_H
0018 #define __TBB_concurrent_hash_map_H
0019
0020 #define __TBB_concurrent_hash_map_H_include_area
0021 #include "internal/_warning_suppress_enable_notice.h"
0022
0023 #include "tbb_stddef.h"
0024 #include <iterator>
0025 #include <utility> // Need std::pair
0026 #include <cstring> // Need std::memset
0027 #include __TBB_STD_SWAP_HEADER
0028
0029 #include "tbb_allocator.h"
0030 #include "spin_rw_mutex.h"
0031 #include "atomic.h"
0032 #include "tbb_exception.h"
0033 #include "tbb_profiling.h"
0034 #include "aligned_space.h"
0035 #include "internal/_tbb_hash_compare_impl.h"
0036 #include "internal/_template_helpers.h"
0037 #include "internal/_allocator_traits.h"
0038 #if __TBB_INITIALIZER_LISTS_PRESENT
0039 #include <initializer_list>
0040 #endif
0041 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
0042 #include <typeinfo>
0043 #endif
0044 #if __TBB_STATISTICS
0045 #include <stdio.h>
0046 #endif
0047 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
0048
0049
0050 #include <tuple>
0051 #endif
0052
0053 namespace tbb {
0054
0055 namespace interface5 {
0056
0057 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<const Key, T> > >
0058 class concurrent_hash_map;
0059
0060
0061 namespace internal {
0062 using namespace tbb::internal;
0063
0064
0065
0066 typedef size_t hashcode_t;
0067
0068 struct hash_map_node_base : tbb::internal::no_copy {
0069
0070 typedef spin_rw_mutex mutex_t;
0071
0072 typedef mutex_t::scoped_lock scoped_t;
0073
0074 hash_map_node_base *next;
0075 mutex_t mutex;
0076 };
0077
0078 static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
0079
0080 static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
0081
0082 class hash_map_base {
0083 public:
0084
0085 typedef size_t size_type;
0086
0087 typedef size_t hashcode_t;
0088
0089 typedef size_t segment_index_t;
0090
0091 typedef hash_map_node_base node_base;
0092
0093 struct bucket : tbb::internal::no_copy {
0094
0095 typedef spin_rw_mutex mutex_t;
0096
0097 typedef mutex_t::scoped_lock scoped_t;
0098 mutex_t mutex;
0099 node_base *node_list;
0100 };
0101
0102 static size_type const embedded_block = 1;
0103
0104 static size_type const embedded_buckets = 1<<embedded_block;
0105
0106 static size_type const first_block = 8;
0107
0108 static size_type const pointers_per_table = sizeof(segment_index_t) * 8;
0109
0110 typedef bucket *segment_ptr_t;
0111
0112 typedef segment_ptr_t segments_table_t[pointers_per_table];
0113
0114 atomic<hashcode_t> my_mask;
0115
0116 segments_table_t my_table;
0117
0118 atomic<size_type> my_size;
0119
0120 bucket my_embedded_segment[embedded_buckets];
0121 #if __TBB_STATISTICS
0122 atomic<unsigned> my_info_resizes;
0123 mutable atomic<unsigned> my_info_restarts;
0124 atomic<unsigned> my_info_rehashes;
0125 #endif
0126
0127 hash_map_base() {
0128 std::memset(my_table, 0, sizeof(my_table));
0129 my_mask = 0;
0130 my_size = 0;
0131 std::memset(my_embedded_segment, 0, sizeof(my_embedded_segment));
0132 for( size_type i = 0; i < embedded_block; i++ )
0133 my_table[i] = my_embedded_segment + segment_base(i);
0134 my_mask = embedded_buckets - 1;
0135 __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
0136 #if __TBB_STATISTICS
0137 my_info_resizes = 0;
0138 my_info_restarts = 0;
0139 my_info_rehashes = 0;
0140 #endif
0141 }
0142
0143
0144 static segment_index_t segment_index_of( size_type index ) {
0145 return segment_index_t( __TBB_Log2( index|1 ) );
0146 }
0147
0148
0149 static segment_index_t segment_base( segment_index_t k ) {
0150 return (segment_index_t(1)<<k & ~segment_index_t(1));
0151 }
0152
0153
0154 static size_type segment_size( segment_index_t k ) {
0155 return size_type(1)<<k;
0156 }
0157
0158
0159 static bool is_valid( void *ptr ) {
0160 return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
0161 }
0162
0163
0164 static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
0165 if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*sizeof(bucket) );
0166 else for(size_type i = 0; i < sz; i++, ptr++) {
0167 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
0168 ptr->node_list = rehash_req;
0169 }
0170 }
0171
0172
0173 static void add_to_bucket( bucket *b, node_base *n ) {
0174 __TBB_ASSERT(b->node_list != rehash_req, NULL);
0175 n->next = b->node_list;
0176 b->node_list = n;
0177 }
0178
0179
0180 struct enable_segment_failsafe : tbb::internal::no_copy {
0181 segment_ptr_t *my_segment_ptr;
0182 enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
0183 ~enable_segment_failsafe() {
0184 if( my_segment_ptr ) *my_segment_ptr = 0;
0185 }
0186 };
0187
0188
0189 template<typename Allocator>
0190 void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) {
0191 typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
0192 typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
0193 bucket_allocator_type bucket_allocator(allocator);
0194 __TBB_ASSERT( k, "Zero segment must be embedded" );
0195 enable_segment_failsafe watchdog( my_table, k );
0196 size_type sz;
0197 __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
0198 if( k >= first_block ) {
0199 sz = segment_size( k );
0200 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
0201 init_buckets( ptr, sz, is_initial );
0202 itt_hide_store_word( my_table[k], ptr );
0203 sz <<= 1;
0204 } else {
0205 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
0206 sz = segment_size( first_block );
0207 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
0208 init_buckets( ptr, sz - embedded_buckets, is_initial );
0209 ptr -= segment_base(embedded_block);
0210 for(segment_index_t i = embedded_block; i < first_block; i++)
0211 itt_hide_store_word( my_table[i], ptr + segment_base(i) );
0212 }
0213 itt_store_word_with_release( my_mask, sz-1 );
0214 watchdog.my_segment_ptr = 0;
0215 }
0216
0217 template<typename Allocator>
0218 void delete_segment(segment_index_t s, const Allocator& allocator) {
0219 typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
0220 typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
0221 bucket_allocator_type bucket_allocator(allocator);
0222 segment_ptr_t buckets_ptr = my_table[s];
0223 size_type sz = segment_size( s ? s : 1 );
0224
0225 if( s >= first_block)
0226 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
0227 else if( s == embedded_block && embedded_block != first_block )
0228 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
0229 segment_size(first_block) - embedded_buckets);
0230 if( s >= embedded_block ) my_table[s] = 0;
0231 }
0232
0233
0234 bucket *get_bucket( hashcode_t h ) const throw() {
0235 segment_index_t s = segment_index_of( h );
0236 h -= segment_base(s);
0237 segment_ptr_t seg = my_table[s];
0238 __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
0239 return &seg[h];
0240 }
0241
0242
0243 void mark_rehashed_levels( hashcode_t h ) throw () {
0244 segment_index_t s = segment_index_of( h );
0245 while( segment_ptr_t seg = my_table[++s] )
0246 if( seg[h].node_list == rehash_req ) {
0247 seg[h].node_list = empty_rehashed;
0248 mark_rehashed_levels( h + ((hashcode_t)1<<s) );
0249 }
0250 }
0251
0252
0253
0254 inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
0255 hashcode_t m_now, m_old = m;
0256 m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
0257 if( m_old != m_now )
0258 return check_rehashing_collision( h, m_old, m = m_now );
0259 return false;
0260 }
0261
0262
0263 bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
0264 __TBB_ASSERT(m_old != m, NULL);
0265 if( (h & m_old) != (h & m) ) {
0266
0267
0268 for( ++m_old; !(h & m_old); m_old <<= 1 )
0269 ;
0270 m_old = (m_old<<1) - 1;
0271 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
0272
0273 if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
0274 {
0275 #if __TBB_STATISTICS
0276 my_info_restarts++;
0277 #endif
0278 return true;
0279 }
0280 }
0281 return false;
0282 }
0283
0284
0285 segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
0286 size_type sz = ++my_size;
0287 add_to_bucket( b, n );
0288
0289 if( sz >= mask ) {
0290 segment_index_t new_seg = __TBB_Log2( mask+1 );
0291 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
0292 static const segment_ptr_t is_allocating = (segment_ptr_t)2;
0293 if( !itt_hide_load_word(my_table[new_seg])
0294 && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
0295 return new_seg;
0296 }
0297 return 0;
0298 }
0299
0300
0301 template<typename Allocator>
0302 void reserve(size_type buckets, const Allocator& allocator) {
0303 if( !buckets-- ) return;
0304 bool is_initial = !my_size;
0305 for( size_type m = my_mask; buckets > m; m = my_mask )
0306 enable_segment( segment_index_of( m+1 ), allocator, is_initial );
0307 }
0308
0309 void internal_swap(hash_map_base &table) {
0310 using std::swap;
0311 swap(this->my_mask, table.my_mask);
0312 swap(this->my_size, table.my_size);
0313 for(size_type i = 0; i < embedded_buckets; i++)
0314 swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
0315 for(size_type i = embedded_block; i < pointers_per_table; i++)
0316 swap(this->my_table[i], table.my_table[i]);
0317 }
0318
0319 #if __TBB_CPP11_RVALUE_REF_PRESENT
0320 void internal_move(hash_map_base&& other) {
0321 my_mask = other.my_mask;
0322 other.my_mask = embedded_buckets - 1;
0323 my_size = other.my_size;
0324 other.my_size = 0;
0325
0326 for(size_type i = 0; i < embedded_buckets; ++i) {
0327 my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list;
0328 other.my_embedded_segment[i].node_list = NULL;
0329 }
0330
0331 for(size_type i = embedded_block; i < pointers_per_table; ++i) {
0332 my_table[i] = other.my_table[i];
0333 other.my_table[i] = NULL;
0334 }
0335 }
0336 #endif
0337 };
0338
0339 template<typename Iterator>
0340 class hash_map_range;
0341
0342
0343
0344
0345 template<typename Container, typename Value>
0346 class hash_map_iterator
0347 : public std::iterator<std::forward_iterator_tag,Value>
0348 {
0349 typedef Container map_type;
0350 typedef typename Container::node node;
0351 typedef hash_map_base::node_base node_base;
0352 typedef hash_map_base::bucket bucket;
0353
0354 template<typename C, typename T, typename U>
0355 friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
0356
0357 template<typename C, typename T, typename U>
0358 friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
0359
0360 template<typename C, typename T, typename U>
0361 friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
0362
0363 template<typename C, typename U>
0364 friend class hash_map_iterator;
0365
0366 template<typename I>
0367 friend class hash_map_range;
0368
0369 void advance_to_next_bucket() {
0370 size_t k = my_index+1;
0371 __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
0372 while( k <= my_map->my_mask ) {
0373
0374 if( k&(k-2) )
0375 ++my_bucket;
0376 else my_bucket = my_map->get_bucket( k );
0377 my_node = static_cast<node*>( my_bucket->node_list );
0378 if( hash_map_base::is_valid(my_node) ) {
0379 my_index = k; return;
0380 }
0381 ++k;
0382 }
0383 my_bucket = 0; my_node = 0; my_index = k;
0384 }
0385 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
0386 template<typename Key, typename T, typename HashCompare, typename A>
0387 friend class interface5::concurrent_hash_map;
0388 #else
0389 public:
0390 #endif
0391
0392 const Container *my_map;
0393
0394
0395 size_t my_index;
0396
0397
0398 const bucket *my_bucket;
0399
0400
0401 node *my_node;
0402
0403 hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
0404
0405 public:
0406
0407 hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {}
0408 hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
0409 my_map(other.my_map),
0410 my_index(other.my_index),
0411 my_bucket(other.my_bucket),
0412 my_node(other.my_node)
0413 {}
0414
0415 hash_map_iterator& operator=( const hash_map_iterator<Container,typename Container::value_type> &other ) {
0416 my_map = other.my_map;
0417 my_index = other.my_index;
0418 my_bucket = other.my_bucket;
0419 my_node = other.my_node;
0420 return *this;
0421 }
0422 Value& operator*() const {
0423 __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
0424 return my_node->value();
0425 }
0426 Value* operator->() const {return &operator*();}
0427 hash_map_iterator& operator++();
0428
0429
0430 hash_map_iterator operator++(int) {
0431 hash_map_iterator old(*this);
0432 operator++();
0433 return old;
0434 }
0435 };
0436
0437 template<typename Container, typename Value>
0438 hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
0439 my_map(&map),
0440 my_index(index),
0441 my_bucket(b),
0442 my_node( static_cast<node*>(n) )
0443 {
0444 if( b && !hash_map_base::is_valid(n) )
0445 advance_to_next_bucket();
0446 }
0447
0448 template<typename Container, typename Value>
0449 hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
0450 my_node = static_cast<node*>( my_node->next );
0451 if( !my_node ) advance_to_next_bucket();
0452 return *this;
0453 }
0454
0455 template<typename Container, typename T, typename U>
0456 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
0457 return i.my_node == j.my_node && i.my_map == j.my_map;
0458 }
0459
0460 template<typename Container, typename T, typename U>
0461 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
0462 return i.my_node != j.my_node || i.my_map != j.my_map;
0463 }
0464
0465
0466
0467 template<typename Iterator>
0468 class hash_map_range {
0469 typedef typename Iterator::map_type map_type;
0470 Iterator my_begin;
0471 Iterator my_end;
0472 mutable Iterator my_midpoint;
0473 size_t my_grainsize;
0474
0475 void set_midpoint() const;
0476 template<typename U> friend class hash_map_range;
0477 public:
0478
0479 typedef std::size_t size_type;
0480 typedef typename Iterator::value_type value_type;
0481 typedef typename Iterator::reference reference;
0482 typedef typename Iterator::difference_type difference_type;
0483 typedef Iterator iterator;
0484
0485
0486 bool empty() const {return my_begin==my_end;}
0487
0488
0489 bool is_divisible() const {
0490 return my_midpoint!=my_end;
0491 }
0492
0493 hash_map_range( hash_map_range& r, split ) :
0494 my_end(r.my_end),
0495 my_grainsize(r.my_grainsize)
0496 {
0497 r.my_end = my_begin = r.my_midpoint;
0498 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
0499 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
0500 set_midpoint();
0501 r.set_midpoint();
0502 }
0503
0504 template<typename U>
0505 hash_map_range( hash_map_range<U>& r) :
0506 my_begin(r.my_begin),
0507 my_end(r.my_end),
0508 my_midpoint(r.my_midpoint),
0509 my_grainsize(r.my_grainsize)
0510 {}
0511
0512 hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
0513 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
0514 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
0515 my_grainsize( grainsize_ )
0516 {
0517 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
0518 set_midpoint();
0519 }
0520 const Iterator& begin() const {return my_begin;}
0521 const Iterator& end() const {return my_end;}
0522
0523 size_type grainsize() const {return my_grainsize;}
0524 };
0525
0526 template<typename Iterator>
0527 void hash_map_range<Iterator>::set_midpoint() const {
0528
0529 size_t m = my_end.my_index-my_begin.my_index;
0530 if( m > my_grainsize ) {
0531 m = my_begin.my_index + m/2u;
0532 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
0533 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
0534 } else {
0535 my_midpoint = my_end;
0536 }
0537 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
0538 "my_begin is after my_midpoint" );
0539 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
0540 "my_midpoint is after my_end" );
0541 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
0542 "[my_begin, my_midpoint) range should not be empty" );
0543 }
0544
0545 }
0546
0547
0548 #if _MSC_VER && !defined(__INTEL_COMPILER)
0549
0550 #pragma warning( push )
0551 #pragma warning( disable: 4127 )
0552 #endif
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584 template<typename Key, typename T, typename HashCompare, typename Allocator>
0585 class concurrent_hash_map : protected internal::hash_map_base {
0586 template<typename Container, typename Value>
0587 friend class internal::hash_map_iterator;
0588
0589 template<typename I>
0590 friend class internal::hash_map_range;
0591
0592 public:
0593 typedef Key key_type;
0594 typedef T mapped_type;
0595 typedef std::pair<const Key,T> value_type;
0596 typedef hash_map_base::size_type size_type;
0597 typedef ptrdiff_t difference_type;
0598 typedef value_type *pointer;
0599 typedef const value_type *const_pointer;
0600 typedef value_type &reference;
0601 typedef const value_type &const_reference;
0602 typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
0603 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
0604 typedef internal::hash_map_range<iterator> range_type;
0605 typedef internal::hash_map_range<const_iterator> const_range_type;
0606 typedef Allocator allocator_type;
0607
0608 protected:
0609 friend class const_accessor;
0610 class node;
0611 typedef typename tbb::internal::allocator_rebind<Allocator, node>::type node_allocator_type;
0612 typedef tbb::internal::allocator_traits<node_allocator_type> node_allocator_traits;
0613 node_allocator_type my_allocator;
0614 HashCompare my_hash_compare;
0615
0616 class node : public node_base {
0617 tbb::aligned_space<value_type> my_value;
0618 public:
0619 value_type* storage() { return my_value.begin(); }
0620 value_type& value() { return *storage(); }
0621 };
0622
0623 void delete_node( node_base *n ) {
0624 node_allocator_traits::destroy(my_allocator, static_cast<node*>(n)->storage());
0625 node_allocator_traits::destroy(my_allocator, static_cast<node*>(n));
0626 node_allocator_traits::deallocate(my_allocator, static_cast<node*>(n), 1);
0627 }
0628
0629 struct node_scoped_guard : tbb::internal::no_copy {
0630 node* my_node;
0631 node_allocator_type& my_alloc;
0632
0633 node_scoped_guard(node* n, node_allocator_type& alloc) : my_node(n), my_alloc(alloc) {}
0634 ~node_scoped_guard() {
0635 if(my_node) {
0636 node_allocator_traits::destroy(my_alloc, my_node);
0637 node_allocator_traits::deallocate(my_alloc, my_node, 1);
0638 }
0639 }
0640 void dismiss() { my_node = NULL; }
0641 };
0642
0643 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
0644 template<typename... Args>
0645 static node* create_node(node_allocator_type& allocator, Args&&... args)
0646 #else
0647 template<typename Arg1, typename Arg2>
0648 static node* create_node(node_allocator_type& allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2)
0649 #endif
0650 {
0651 node* node_ptr = node_allocator_traits::allocate(allocator, 1);
0652 node_scoped_guard guard(node_ptr, allocator);
0653 node_allocator_traits::construct(allocator, node_ptr);
0654 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
0655 node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward<Args>(args)...);
0656 #else
0657 node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward<Arg1>(arg1), tbb::internal::forward<Arg2>(arg2));
0658 #endif
0659 guard.dismiss();
0660 return node_ptr;
0661 }
0662
0663 static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){
0664 return create_node(allocator, key, *t);
0665 }
0666
0667 #if __TBB_CPP11_RVALUE_REF_PRESENT
0668 static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){
0669 return create_node(allocator, key, std::move(*const_cast<T*>(t)));
0670 }
0671 #endif
0672
0673 static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){
0674 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
0675
0676 return create_node(allocator, std::piecewise_construct,
0677 std::forward_as_tuple(key), std::forward_as_tuple());
0678 #else
0679
0680
0681 T obj = T();
0682 return create_node(allocator, key, tbb::internal::move(obj));
0683 #endif
0684 }
0685
0686 static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){
0687 __TBB_ASSERT(false,"this dummy function should not be called");
0688 return NULL;
0689 }
0690
0691 node *search_bucket( const key_type &key, bucket *b ) const {
0692 node *n = static_cast<node*>( b->node_list );
0693 while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) )
0694 n = static_cast<node*>( n->next );
0695 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
0696 return n;
0697 }
0698
0699
0700 class bucket_accessor : public bucket::scoped_t {
0701 bucket *my_b;
0702 public:
0703 bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
0704
0705 inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
0706 my_b = base->get_bucket( h );
0707
0708 if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req
0709 && try_acquire( my_b->mutex, true ) )
0710 {
0711 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h );
0712 }
0713 else bucket::scoped_t::acquire( my_b->mutex, writer );
0714 __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
0715 }
0716
0717 bool is_writer() { return bucket::scoped_t::is_writer; }
0718
0719 bucket *operator() () { return my_b; }
0720 };
0721
0722
0723 void rehash_bucket( bucket *b_new, const hashcode_t h ) {
0724 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
0725 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
0726 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed);
0727 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1;
0728 #if __TBB_STATISTICS
0729 my_info_rehashes++;
0730 #endif
0731
0732 bucket_accessor b_old( this, h & mask );
0733
0734 mask = (mask<<1) | 1;
0735 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
0736 restart:
0737 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
0738 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->value().first );
0739 #if TBB_USE_ASSERT
0740 hashcode_t bmask = h & (mask>>1);
0741 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1;
0742 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
0743 #endif
0744 if( (c & mask) == h ) {
0745 if( !b_old.is_writer() )
0746 if( !b_old.upgrade_to_writer() ) {
0747 goto restart;
0748 }
0749 *p = n->next;
0750 add_to_bucket( b_new, n );
0751 } else p = &n->next;
0752 }
0753 }
0754
0755 struct call_clear_on_leave {
0756 concurrent_hash_map* my_ch_map;
0757 call_clear_on_leave( concurrent_hash_map* a_ch_map ) : my_ch_map(a_ch_map) {}
0758 void dismiss() {my_ch_map = 0;}
0759 ~call_clear_on_leave(){
0760 if (my_ch_map){
0761 my_ch_map->clear();
0762 }
0763 }
0764 };
0765 public:
0766
0767 class accessor;
0768
0769 class const_accessor : private node::scoped_t {
0770 friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
0771 friend class accessor;
0772 public:
0773
0774 typedef const typename concurrent_hash_map::value_type value_type;
0775
0776
0777 bool empty() const { return !my_node; }
0778
0779
0780 void release() {
0781 if( my_node ) {
0782 node::scoped_t::release();
0783 my_node = 0;
0784 }
0785 }
0786
0787
0788 const_reference operator*() const {
0789 __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
0790 return my_node->value();
0791 }
0792
0793
0794 const_pointer operator->() const {
0795 return &operator*();
0796 }
0797
0798
0799 const_accessor() : my_node(NULL) {}
0800
0801
0802 ~const_accessor() {
0803 my_node = NULL;
0804 }
0805 protected:
0806 bool is_writer() { return node::scoped_t::is_writer; }
0807 node *my_node;
0808 hashcode_t my_hash;
0809 };
0810
0811
0812 class accessor: public const_accessor {
0813 public:
0814
0815 typedef typename concurrent_hash_map::value_type value_type;
0816
0817
0818 reference operator*() const {
0819 __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
0820 return this->my_node->value();
0821 }
0822
0823
0824 pointer operator->() const {
0825 return &operator*();
0826 }
0827 };
0828
0829
0830 explicit concurrent_hash_map( const allocator_type &a = allocator_type() )
0831 : internal::hash_map_base(), my_allocator(a)
0832 {}
0833
0834 explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() )
0835 : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
0836 {}
0837
0838
0839 concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() )
0840 : internal::hash_map_base(), my_allocator(a)
0841 {
0842 reserve( n, my_allocator );
0843 }
0844
0845 concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() )
0846 : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
0847 {
0848 reserve( n, my_allocator );
0849 }
0850
0851
0852 concurrent_hash_map( const concurrent_hash_map &table )
0853 : internal::hash_map_base(),
0854 my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
0855 {
0856 call_clear_on_leave scope_guard(this);
0857 internal_copy(table);
0858 scope_guard.dismiss();
0859 }
0860
0861 concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a)
0862 : internal::hash_map_base(), my_allocator(a)
0863 {
0864 call_clear_on_leave scope_guard(this);
0865 internal_copy(table);
0866 scope_guard.dismiss();
0867 }
0868
0869 #if __TBB_CPP11_RVALUE_REF_PRESENT
0870
0871 concurrent_hash_map( concurrent_hash_map &&table )
0872 : internal::hash_map_base(), my_allocator(std::move(table.get_allocator()))
0873 {
0874 internal_move(std::move(table));
0875 }
0876
0877
0878 concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a )
0879 : internal::hash_map_base(), my_allocator(a)
0880 {
0881 if (a == table.get_allocator()){
0882 internal_move(std::move(table));
0883 }else{
0884 call_clear_on_leave scope_guard(this);
0885 internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
0886 scope_guard.dismiss();
0887 }
0888 }
0889 #endif
0890
0891
0892 template<typename I>
0893 concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() )
0894 : internal::hash_map_base(), my_allocator(a)
0895 {
0896 call_clear_on_leave scope_guard(this);
0897 internal_copy(first, last, std::distance(first, last));
0898 scope_guard.dismiss();
0899 }
0900
0901 template<typename I>
0902 concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() )
0903 : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
0904 {
0905 call_clear_on_leave scope_guard(this);
0906 internal_copy(first, last, std::distance(first, last));
0907 scope_guard.dismiss();
0908 }
0909
0910 #if __TBB_INITIALIZER_LISTS_PRESENT
0911
0912 concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type &a = allocator_type() )
0913 : internal::hash_map_base(), my_allocator(a)
0914 {
0915 call_clear_on_leave scope_guard(this);
0916 internal_copy(il.begin(), il.end(), il.size());
0917 scope_guard.dismiss();
0918 }
0919
0920 concurrent_hash_map( std::initializer_list<value_type> il, const HashCompare& compare, const allocator_type& a = allocator_type() )
0921 : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
0922 {
0923 call_clear_on_leave scope_guard(this);
0924 internal_copy(il.begin(), il.end(), il.size());
0925 scope_guard.dismiss();
0926 }
0927
0928 #endif
0929
0930
0931 concurrent_hash_map& operator=( const concurrent_hash_map &table ) {
0932 if( this!=&table ) {
0933 typedef typename node_allocator_traits::propagate_on_container_copy_assignment pocca_type;
0934 clear();
0935 tbb::internal::allocator_copy_assignment(my_allocator, table.my_allocator, pocca_type());
0936 internal_copy(table);
0937 }
0938 return *this;
0939 }
0940
0941 #if __TBB_CPP11_RVALUE_REF_PRESENT
0942
0943 concurrent_hash_map& operator=( concurrent_hash_map &&table ) {
0944 if(this != &table) {
0945 typedef typename node_allocator_traits::propagate_on_container_move_assignment pocma_type;
0946 internal_move_assign(std::move(table), pocma_type());
0947 }
0948 return *this;
0949 }
0950 #endif
0951
0952 #if __TBB_INITIALIZER_LISTS_PRESENT
0953
0954 concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
0955 clear();
0956 internal_copy(il.begin(), il.end(), il.size());
0957 return *this;
0958 }
0959 #endif
0960
0961
0962
0963
0964
0965 void rehash(size_type n = 0);
0966
0967
0968 void clear();
0969
0970
0971 ~concurrent_hash_map() { clear(); }
0972
0973
0974
0975
0976 range_type range( size_type grainsize=1 ) {
0977 return range_type( *this, grainsize );
0978 }
0979 const_range_type range( size_type grainsize=1 ) const {
0980 return const_range_type( *this, grainsize );
0981 }
0982
0983
0984
0985
0986 iterator begin() { return iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
0987 iterator end() { return iterator( *this, 0, 0, 0 ); }
0988 const_iterator begin() const { return const_iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
0989 const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); }
0990 std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
0991 std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
0992
0993
0994 size_type size() const { return my_size; }
0995
0996
0997 bool empty() const { return my_size == 0; }
0998
0999
1000 size_type max_size() const {return (~size_type(0))/sizeof(node);}
1001
1002
1003 size_type bucket_count() const { return my_mask+1; }
1004
1005
1006 allocator_type get_allocator() const { return this->my_allocator; }
1007
1008
1009 void swap( concurrent_hash_map &table );
1010
1011
1012
1013
1014
1015
1016 size_type count( const Key &key ) const {
1017 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, NULL, false, &do_not_allocate_node );
1018 }
1019
1020
1021
1022 bool find( const_accessor &result, const Key &key ) const {
1023 result.release();
1024 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, &result, false, &do_not_allocate_node );
1025 }
1026
1027
1028
1029 bool find( accessor &result, const Key &key ) {
1030 result.release();
1031 return lookup(false, key, NULL, &result, true, &do_not_allocate_node );
1032 }
1033
1034
1035
1036 bool insert( const_accessor &result, const Key &key ) {
1037 result.release();
1038 return lookup(true, key, NULL, &result, false, &allocate_node_default_construct );
1039 }
1040
1041
1042
1043 bool insert( accessor &result, const Key &key ) {
1044 result.release();
1045 return lookup(true, key, NULL, &result, true, &allocate_node_default_construct );
1046 }
1047
1048
1049
1050 bool insert( const_accessor &result, const value_type &value ) {
1051 result.release();
1052 return lookup(true, value.first, &value.second, &result, false, &allocate_node_copy_construct );
1053 }
1054
1055
1056
1057 bool insert( accessor &result, const value_type &value ) {
1058 result.release();
1059 return lookup(true, value.first, &value.second, &result, true, &allocate_node_copy_construct );
1060 }
1061
1062
1063
1064 bool insert( const value_type &value ) {
1065 return lookup(true, value.first, &value.second, NULL, false, &allocate_node_copy_construct );
1066 }
1067
1068 #if __TBB_CPP11_RVALUE_REF_PRESENT
1069
1070
1071 bool insert( const_accessor &result, value_type && value ) {
1072 return generic_move_insert(result, std::move(value));
1073 }
1074
1075
1076
1077 bool insert( accessor &result, value_type && value ) {
1078 return generic_move_insert(result, std::move(value));
1079 }
1080
1081
1082
1083 bool insert( value_type && value ) {
1084 return generic_move_insert(accessor_not_used(), std::move(value));
1085 }
1086
1087 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1088
1089
1090 template<typename... Args>
1091 bool emplace( const_accessor &result, Args&&... args ) {
1092 return generic_emplace(result, std::forward<Args>(args)...);
1093 }
1094
1095
1096
1097 template<typename... Args>
1098 bool emplace( accessor &result, Args&&... args ) {
1099 return generic_emplace(result, std::forward<Args>(args)...);
1100 }
1101
1102
1103
1104 template<typename... Args>
1105 bool emplace( Args&&... args ) {
1106 return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1107 }
1108 #endif
1109 #endif
1110
1111
1112 template<typename I>
1113 void insert( I first, I last ) {
1114 for ( ; first != last; ++first )
1115 insert( *first );
1116 }
1117
1118 #if __TBB_INITIALIZER_LISTS_PRESENT
1119
1120 void insert( std::initializer_list<value_type> il ) {
1121 insert( il.begin(), il.end() );
1122 }
1123 #endif
1124
1125
1126
1127 bool erase( const Key& key );
1128
1129
1130
1131 bool erase( const_accessor& item_accessor ) {
1132 return exclude( item_accessor );
1133 }
1134
1135
1136
1137 bool erase( accessor& item_accessor ) {
1138 return exclude( item_accessor );
1139 }
1140
1141 protected:
1142
1143 bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ), node *tmp_n = 0 ) ;
1144
1145 struct accessor_not_used { void release(){}};
1146 friend const_accessor* accessor_location( accessor_not_used const& ){ return NULL;}
1147 friend const_accessor* accessor_location( const_accessor & a ) { return &a;}
1148
1149 friend bool is_write_access_needed( accessor const& ) { return true;}
1150 friend bool is_write_access_needed( const_accessor const& ) { return false;}
1151 friend bool is_write_access_needed( accessor_not_used const& ) { return false;}
1152
1153 #if __TBB_CPP11_RVALUE_REF_PRESENT
1154 template<typename Accessor>
1155 bool generic_move_insert( Accessor && result, value_type && value ) {
1156 result.release();
1157 return lookup(true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1158 }
1159
1160 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1161 template<typename Accessor, typename... Args>
1162 bool generic_emplace( Accessor && result, Args &&... args ) {
1163 result.release();
1164 node * node_ptr = create_node(my_allocator, std::forward<Args>(args)...);
1165 return lookup(true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
1166 }
1167 #endif
1168 #endif
1169
1170
1171 bool exclude( const_accessor &item_accessor );
1172
1173
1174 template<typename I>
1175 std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
1176
1177
1178 void internal_copy( const concurrent_hash_map& source );
1179
1180 template<typename I>
1181 void internal_copy( I first, I last, size_type reserve_size );
1182
1183 #if __TBB_CPP11_RVALUE_REF_PRESENT
1184
1185 void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_true_type) {
1186 tbb::internal::allocator_move_assignment(my_allocator, other.my_allocator, tbb::internal::traits_true_type());
1187 internal_move(std::move(other));
1188 }
1189
1190 void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_false_type) {
1191 if (this->my_allocator == other.my_allocator) {
1192 internal_move(std::move(other));
1193 } else {
1194
1195 internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1196 }
1197 }
1198 #endif
1199
1200
1201
1202
1203 const_pointer internal_fast_find( const Key& key ) const {
1204 hashcode_t h = my_hash_compare.hash( key );
1205 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1206 node *n;
1207 restart:
1208 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1209 bucket *b = get_bucket( h & m );
1210
1211 if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req )
1212 {
1213 bucket::scoped_t lock;
1214 if( lock.try_acquire( b->mutex, true ) ) {
1215 if( b->node_list == internal::rehash_req)
1216 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m );
1217 }
1218 else lock.acquire( b->mutex, false );
1219 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
1220 }
1221 n = search_bucket( key, b );
1222 if( n )
1223 return n->storage();
1224 else if( check_mask_race( h, m ) )
1225 goto restart;
1226 return 0;
1227 }
1228 };
1229
1230 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1231 namespace internal {
1232 using namespace tbb::internal;
1233
1234 template<template<typename...> typename Map, typename Key, typename T, typename... Args>
1235 using hash_map_t = Map<
1236 Key, T,
1237 std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1238 pack_element_t<0, Args...>, tbb_hash_compare<Key> >,
1239 std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t<sizeof...(Args)-1, Args...> >,
1240 pack_element_t<sizeof...(Args)-1, Args...>, tbb_allocator<std::pair<const Key, T> > >
1241 >;
1242 }
1243
1244
1245 template<typename I, typename... Args>
1246 concurrent_hash_map(I, I, Args...)
1247 -> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1248
1249
1250
1251 template<typename Key, typename T, typename CompareOrAllocator>
1252 concurrent_hash_map(std::initializer_list<std::pair<const Key, T>>, CompareOrAllocator)
1253 -> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1254
1255 #endif
1256
1257 template<typename Key, typename T, typename HashCompare, typename A>
1258 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) {
1259 __TBB_ASSERT( !result || !result->my_node, NULL );
1260 bool return_value;
1261 hashcode_t const h = my_hash_compare.hash( key );
1262 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1263 segment_index_t grow_segment = 0;
1264 node *n;
1265 restart:
1266 {
1267 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1268 return_value = false;
1269
1270 bucket_accessor b( this, h & m );
1271
1272
1273 n = search_bucket( key, b() );
1274 if( op_insert ) {
1275
1276 if( !n ) {
1277 if( !tmp_n ) {
1278 tmp_n = allocate_node(my_allocator, key, t);
1279 }
1280 if( !b.is_writer() && !b.upgrade_to_writer() ) {
1281
1282 n = search_bucket( key, b() );
1283 if( is_valid(n) ) {
1284 b.downgrade_to_reader();
1285 goto exists;
1286 }
1287 }
1288 if( check_mask_race(h, m) )
1289 goto restart;
1290
1291 grow_segment = insert_new_node( b(), n = tmp_n, m );
1292 tmp_n = 0;
1293 return_value = true;
1294 }
1295 } else {
1296 if( !n ) {
1297 if( check_mask_race( h, m ) )
1298 goto restart;
1299 return false;
1300 }
1301 return_value = true;
1302 }
1303 exists:
1304 if( !result ) goto check_growth;
1305
1306
1307 if( !result->try_acquire( n->mutex, write ) ) {
1308 for( tbb::internal::atomic_backoff backoff(true);; ) {
1309 if( result->try_acquire( n->mutex, write ) ) break;
1310 if( !backoff.bounded_pause() ) {
1311
1312 b.release();
1313 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1314 __TBB_Yield();
1315 m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1316 goto restart;
1317 }
1318 }
1319 }
1320 }
1321 result->my_node = n;
1322 result->my_hash = h;
1323 check_growth:
1324
1325 if( grow_segment ) {
1326 #if __TBB_STATISTICS
1327 my_info_resizes++;
1328 #endif
1329 enable_segment( grow_segment, my_allocator );
1330 }
1331 if( tmp_n )
1332 delete_node( tmp_n );
1333 return return_value;
1334 }
1335
1336 template<typename Key, typename T, typename HashCompare, typename A>
1337 template<typename I>
1338 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
1339 hashcode_t h = my_hash_compare.hash( key );
1340 hashcode_t m = my_mask;
1341 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1342 h &= m;
1343 bucket *b = get_bucket( h );
1344 while( b->node_list == internal::rehash_req ) {
1345 m = ( 1u<<__TBB_Log2( h ) ) - 1;
1346 b = get_bucket( h &= m );
1347 }
1348 node *n = search_bucket( key, b );
1349 if( !n )
1350 return std::make_pair(end_, end_);
1351 iterator lower(*this, h, b, n), upper(lower);
1352 return std::make_pair(lower, ++upper);
1353 }
1354
1355 template<typename Key, typename T, typename HashCompare, typename A>
1356 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor ) {
1357 __TBB_ASSERT( item_accessor.my_node, NULL );
1358 node_base *const n = item_accessor.my_node;
1359 hashcode_t const h = item_accessor.my_hash;
1360 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1361 do {
1362
1363 bucket_accessor b( this, h & m, true );
1364 node_base **p = &b()->node_list;
1365 while( *p && *p != n )
1366 p = &(*p)->next;
1367 if( !*p ) {
1368 if( check_mask_race( h, m ) )
1369 continue;
1370 item_accessor.release();
1371 return false;
1372 }
1373 __TBB_ASSERT( *p == n, NULL );
1374 *p = n->next;
1375 my_size--;
1376 break;
1377 } while(true);
1378 if( !item_accessor.is_writer() )
1379 item_accessor.upgrade_to_writer();
1380 item_accessor.release();
1381 delete_node( n );
1382 return true;
1383 }
1384
1385 template<typename Key, typename T, typename HashCompare, typename A>
1386 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
1387 node_base *n;
1388 hashcode_t const h = my_hash_compare.hash( key );
1389 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1390 restart:
1391 {
1392
1393 bucket_accessor b( this, h & m );
1394 search:
1395 node_base **p = &b()->node_list;
1396 n = *p;
1397 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->value().first ) ) {
1398 p = &n->next;
1399 n = *p;
1400 }
1401 if( !n ) {
1402 if( check_mask_race( h, m ) )
1403 goto restart;
1404 return false;
1405 }
1406 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1407 if( check_mask_race( h, m ) )
1408 goto restart;
1409 goto search;
1410 }
1411 *p = n->next;
1412 my_size--;
1413 }
1414 {
1415 typename node::scoped_t item_locker( n->mutex, true );
1416 }
1417
1418 delete_node( n );
1419 return true;
1420 }
1421
1422 template<typename Key, typename T, typename HashCompare, typename A>
1423 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
1424 typedef typename node_allocator_traits::propagate_on_container_swap pocs_type;
1425 if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) {
1426 using std::swap;
1427 tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type());
1428 swap(this->my_hash_compare, table.my_hash_compare);
1429 internal_swap(table);
1430 }
1431 }
1432
1433 template<typename Key, typename T, typename HashCompare, typename A>
1434 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
1435 reserve( sz, my_allocator );
1436 hashcode_t mask = my_mask;
1437 hashcode_t b = (mask+1)>>1;
1438 __TBB_ASSERT((b&(b-1))==0, NULL);
1439 bucket *bp = get_bucket( b );
1440 for(; b <= mask; b++, bp++ ) {
1441 node_base *n = bp->node_list;
1442 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1443 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1444 if( n == internal::rehash_req ) {
1445 hashcode_t h = b; bucket *b_old = bp;
1446 do {
1447 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1448 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1;
1449 b_old = get_bucket( h &= m );
1450 } while( b_old->node_list == internal::rehash_req );
1451
1452 mark_rehashed_levels( h );
1453 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1454 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->value().first );
1455 if( (c & mask) != h ) {
1456 *p = q->next;
1457 bucket *b_new = get_bucket( c & mask );
1458 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1459 add_to_bucket( b_new, q );
1460 } else p = &q->next;
1461 }
1462 }
1463 }
1464 #if TBB_USE_PERFORMANCE_WARNINGS
1465 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
1466 static bool reported = false;
1467 #endif
1468 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1469 for( b = 0; b <= mask; b++ ) {
1470 if( b & (b-2) ) ++bp;
1471 else bp = get_bucket( b );
1472 node_base *n = bp->node_list;
1473 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1474 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1475 #if TBB_USE_PERFORMANCE_WARNINGS
1476 if( n == internal::empty_rehashed ) empty_buckets++;
1477 else if( n->next ) overpopulated_buckets++;
1478 #endif
1479 #if TBB_USE_ASSERT
1480 for( ; is_valid(n); n = n->next ) {
1481 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first ) & mask;
1482 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1483 }
1484 #endif
1485 }
1486 #endif
1487 #if TBB_USE_PERFORMANCE_WARNINGS
1488 if( buckets > current_size) empty_buckets -= buckets - current_size;
1489 else overpopulated_buckets -= current_size - buckets;
1490 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1491 tbb::internal::runtime_warning(
1492 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1493 #if __TBB_USE_OPTIONAL_RTTI
1494 typeid(*this).name(),
1495 #else
1496 "concurrent_hash_map",
1497 #endif
1498 current_size, empty_buckets, overpopulated_buckets );
1499 reported = true;
1500 }
1501 #endif
1502 }
1503
1504 template<typename Key, typename T, typename HashCompare, typename A>
1505 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
1506 hashcode_t m = my_mask;
1507 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1508 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1509 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1510 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
1511 static bool reported = false;
1512 #endif
1513 bucket *bp = 0;
1514
1515 for( segment_index_t b = 0; b <= m; b++ ) {
1516 if( b & (b-2) ) ++bp;
1517 else bp = get_bucket( b );
1518 node_base *n = bp->node_list;
1519 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1520 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1521 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1522 if( n == internal::empty_rehashed ) empty_buckets++;
1523 else if( n == internal::rehash_req ) buckets--;
1524 else if( n->next ) overpopulated_buckets++;
1525 #endif
1526 #if __TBB_EXTRA_DEBUG
1527 for(; is_valid(n); n = n->next ) {
1528 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first );
1529 h &= m;
1530 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1531 }
1532 #endif
1533 }
1534 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1535 #if __TBB_STATISTICS
1536 printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1537 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1538 current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
1539 unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
1540 my_info_resizes = 0;
1541 my_info_restarts = 0;
1542 my_info_rehashes = 0;
1543 #endif
1544 if( buckets > current_size) empty_buckets -= buckets - current_size;
1545 else overpopulated_buckets -= current_size - buckets;
1546 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1547 tbb::internal::runtime_warning(
1548 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1549 #if __TBB_USE_OPTIONAL_RTTI
1550 typeid(*this).name(),
1551 #else
1552 "concurrent_hash_map",
1553 #endif
1554 current_size, empty_buckets, overpopulated_buckets );
1555 reported = true;
1556 }
1557 #endif
1558 #endif
1559 my_size = 0;
1560 segment_index_t s = segment_index_of( m );
1561 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1562 do {
1563 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1564 segment_ptr_t buckets_ptr = my_table[s];
1565 size_type sz = segment_size( s ? s : 1 );
1566 for( segment_index_t i = 0; i < sz; i++ )
1567 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1568 buckets_ptr[i].node_list = n->next;
1569 delete_node( n );
1570 }
1571 delete_segment(s, my_allocator);
1572 } while(s-- > 0);
1573 my_mask = embedded_buckets - 1;
1574 }
1575
1576 template<typename Key, typename T, typename HashCompare, typename A>
1577 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
1578 hashcode_t mask = source.my_mask;
1579 if( my_mask == mask ) {
1580 reserve( source.my_size, my_allocator );
1581 bucket *dst = 0, *src = 0;
1582 bool rehash_required = false;
1583 for( hashcode_t k = 0; k <= mask; k++ ) {
1584 if( k & (k-2) ) ++dst,src++;
1585 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1586 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1587 node *n = static_cast<node*>( src->node_list );
1588 if( n == internal::rehash_req ) {
1589 rehash_required = true;
1590 dst->node_list = internal::rehash_req;
1591 } else for(; n; n = static_cast<node*>( n->next ) ) {
1592 node* node_ptr = create_node(my_allocator, n->value().first, n->value().second);
1593 add_to_bucket( dst, node_ptr);
1594 ++my_size;
1595 }
1596 }
1597 if( rehash_required ) rehash();
1598 } else internal_copy( source.begin(), source.end(), source.my_size );
1599 }
1600
1601 template<typename Key, typename T, typename HashCompare, typename A>
1602 template<typename I>
1603 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last, size_type reserve_size) {
1604 reserve( reserve_size, my_allocator );
1605 hashcode_t m = my_mask;
1606 for(; first != last; ++first) {
1607 hashcode_t h = my_hash_compare.hash( (*first).first );
1608 bucket *b = get_bucket( h & m );
1609 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1610 node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1611 add_to_bucket( b, node_ptr );
1612 ++my_size;
1613 }
1614 }
1615
1616 }
1617
1618 using interface5::concurrent_hash_map;
1619
1620
1621 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1622 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
1623 if(a.size() != b.size()) return false;
1624 typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
1625 typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
1626 for(; i != i_end; ++i) {
1627 j = b.equal_range(i->first).first;
1628 if( j == j_end || !(i->second == j->second) ) return false;
1629 }
1630 return true;
1631 }
1632
1633 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1634 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
1635 { return !(a == b); }
1636
1637 template<typename Key, typename T, typename HashCompare, typename A>
1638 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
1639 { a.swap( b ); }
1640
1641 #if _MSC_VER && !defined(__INTEL_COMPILER)
1642 #pragma warning( pop )
1643 #endif
1644
1645 }
1646
1647 #include "internal/_warning_suppress_disable_notice.h"
1648 #undef __TBB_concurrent_hash_map_H_include_area
1649
1650 #endif