File indexing completed on 2025-01-18 10:12:54
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef __TBB_enumerable_thread_specific_H
0018 #define __TBB_enumerable_thread_specific_H
0019
0020 #define __TBB_enumerable_thread_specific_H_include_area
0021 #include "internal/_warning_suppress_enable_notice.h"
0022
0023 #include "atomic.h"
0024 #include "concurrent_vector.h"
0025 #include "tbb_thread.h"
0026 #include "tbb_allocator.h"
0027 #include "cache_aligned_allocator.h"
0028 #include "aligned_space.h"
0029 #include "internal/_template_helpers.h"
0030 #include "internal/_tbb_hash_compare_impl.h"
0031 #include "tbb_profiling.h"
0032 #include <string.h> // for memcpy
0033
0034 #if __TBB_PREVIEW_RESUMABLE_TASKS
0035 #include "task.h" // for task::suspend_point
0036 #endif
0037
0038 #if _WIN32||_WIN64
0039 #include "machine/windows_api.h"
0040 #else
0041 #include <pthread.h>
0042 #endif
0043
0044 #define __TBB_ETS_USE_CPP11 \
0045 (__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \
0046 && __TBB_CPP11_DECLTYPE_PRESENT && __TBB_CPP11_LAMBDAS_PRESENT)
0047
0048 namespace tbb {
0049
0050
0051 enum ets_key_usage_type {
0052 ets_key_per_instance
0053 , ets_no_key
0054 #if __TBB_PREVIEW_RESUMABLE_TASKS
0055 , ets_suspend_aware
0056 #endif
0057 };
0058
0059 namespace interface6 {
0060
0061
0062 template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
0063 class enumerable_thread_specific;
0064
0065
0066 namespace internal {
0067
0068 using namespace tbb::internal;
0069
0070 template <ets_key_usage_type ETS_key_type>
0071 struct ets_key_selector {
0072 typedef tbb_thread::id key_type;
0073 static key_type current_key() {
0074 return tbb::internal::thread_get_id_v3();
0075 }
0076 };
0077
0078 #if __TBB_PREVIEW_RESUMABLE_TASKS
0079 template <>
0080 struct ets_key_selector<ets_suspend_aware> {
0081 typedef task::suspend_point key_type;
0082 static key_type current_key() {
0083 return internal_current_suspend_point();
0084 }
0085 };
0086
0087 inline task::suspend_point atomic_compare_and_swap(task::suspend_point& location,
0088 const task::suspend_point& value, const task::suspend_point& comparand) {
0089 return as_atomic(location).compare_and_swap(value, comparand);
0090 }
0091 #endif
0092
0093 template<ets_key_usage_type ETS_key_type>
0094 class ets_base: tbb::internal::no_copy {
0095 protected:
0096 typedef typename ets_key_selector<ETS_key_type>::key_type key_type;
0097 #if __TBB_PROTECTED_NESTED_CLASS_BROKEN
0098 public:
0099 #endif
0100 struct slot;
0101
0102 struct array {
0103 array* next;
0104 size_t lg_size;
0105 slot& at( size_t k ) {
0106 return ((slot*)(void*)(this+1))[k];
0107 }
0108 size_t size() const {return size_t(1)<<lg_size;}
0109 size_t mask() const {return size()-1;}
0110 size_t start( size_t h ) const {
0111 return h>>(8*sizeof(size_t)-lg_size);
0112 }
0113 };
0114 struct slot {
0115 key_type key;
0116 void* ptr;
0117 bool empty() const {return key == key_type();}
0118 bool match( key_type k ) const {return key == k;}
0119 bool claim( key_type k ) {
0120
0121 return atomic_compare_and_swap(key, k, key_type()) == key_type();
0122 }
0123 };
0124 #if __TBB_PROTECTED_NESTED_CLASS_BROKEN
0125 protected:
0126 #endif
0127
0128
0129
0130
0131 atomic<array*> my_root;
0132 atomic<size_t> my_count;
0133 virtual void* create_local() = 0;
0134 virtual void* create_array(size_t _size) = 0;
0135 virtual void free_array(void* ptr, size_t _size) = 0;
0136 array* allocate( size_t lg_size ) {
0137 size_t n = size_t(1)<<lg_size;
0138 array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
0139 a->lg_size = lg_size;
0140 std::memset( a+1, 0, n*sizeof(slot) );
0141 return a;
0142 }
0143 void free(array* a) {
0144 size_t n = size_t(1)<<(a->lg_size);
0145 free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
0146 }
0147
0148 ets_base() {my_root=NULL; my_count=0;}
0149 virtual ~ets_base();
0150 void* table_lookup( bool& exists );
0151 void table_clear();
0152
0153
0154 template <ets_key_usage_type E2>
0155 void table_elementwise_copy( const ets_base& other,
0156 void*(*add_element)(ets_base<E2>&, void*) ) {
0157 __TBB_ASSERT(!my_root,NULL);
0158 __TBB_ASSERT(!my_count,NULL);
0159 if( !other.my_root ) return;
0160 array* root = my_root = allocate(other.my_root->lg_size);
0161 root->next = NULL;
0162 my_count = other.my_count;
0163 size_t mask = root->mask();
0164 for( array* r=other.my_root; r; r=r->next ) {
0165 for( size_t i=0; i<r->size(); ++i ) {
0166 slot& s1 = r->at(i);
0167 if( !s1.empty() ) {
0168 for( size_t j = root->start(tbb::tbb_hash<key_type>()(s1.key)); ; j=(j+1)&mask ) {
0169 slot& s2 = root->at(j);
0170 if( s2.empty() ) {
0171 s2.ptr = add_element(static_cast<ets_base<E2>&>(*this), s1.ptr);
0172 s2.key = s1.key;
0173 break;
0174 }
0175 else if( s2.match(s1.key) )
0176 break;
0177 }
0178 }
0179 }
0180 }
0181 }
0182 void table_swap( ets_base& other ) {
0183 __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
0184 tbb::internal::swap<relaxed>(my_root, other.my_root);
0185 tbb::internal::swap<relaxed>(my_count, other.my_count);
0186 }
0187 };
0188
0189 template<ets_key_usage_type ETS_key_type>
0190 ets_base<ETS_key_type>::~ets_base() {
0191 __TBB_ASSERT(!my_root, NULL);
0192 }
0193
0194 template<ets_key_usage_type ETS_key_type>
0195 void ets_base<ETS_key_type>::table_clear() {
0196 while( array* r = my_root ) {
0197 my_root = r->next;
0198 free(r);
0199 }
0200 my_count = 0;
0201 }
0202
0203 template<ets_key_usage_type ETS_key_type>
0204 void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
0205 const key_type k = ets_key_selector<ETS_key_type>::current_key();
0206
0207 __TBB_ASSERT(k != key_type(),NULL);
0208 void* found;
0209 size_t h = tbb::tbb_hash<key_type>()(k);
0210 for( array* r=my_root; r; r=r->next ) {
0211 call_itt_notify(acquired,r);
0212 size_t mask=r->mask();
0213 for(size_t i = r->start(h); ;i=(i+1)&mask) {
0214 slot& s = r->at(i);
0215 if( s.empty() ) break;
0216 if( s.match(k) ) {
0217 if( r==my_root ) {
0218
0219 exists = true;
0220 return s.ptr;
0221 } else {
0222
0223 exists = true;
0224 found = s.ptr;
0225 goto insert;
0226 }
0227 }
0228 }
0229 }
0230
0231
0232
0233 exists = false;
0234 found = create_local();
0235 {
0236 size_t c = ++my_count;
0237 array* r = my_root;
0238 call_itt_notify(acquired,r);
0239 if( !r || c>r->size()/2 ) {
0240 size_t s = r ? r->lg_size : 2;
0241 while( c>size_t(1)<<(s-1) ) ++s;
0242 array* a = allocate(s);
0243 for(;;) {
0244 a->next = r;
0245 call_itt_notify(releasing,a);
0246 array* new_r = my_root.compare_and_swap(a,r);
0247 if( new_r==r ) break;
0248 call_itt_notify(acquired, new_r);
0249 if( new_r->lg_size>=s ) {
0250
0251 free(a);
0252 break;
0253 }
0254 r = new_r;
0255 }
0256 }
0257 }
0258 insert:
0259
0260
0261
0262 array* ir = my_root;
0263 call_itt_notify(acquired, ir);
0264 size_t mask = ir->mask();
0265 for(size_t i = ir->start(h);;i=(i+1)&mask) {
0266 slot& s = ir->at(i);
0267 if( s.empty() ) {
0268 if( s.claim(k) ) {
0269 s.ptr = found;
0270 return found;
0271 }
0272 }
0273 }
0274 }
0275
0276
0277 template <>
0278 class ets_base<ets_key_per_instance>: public ets_base<ets_no_key> {
0279 typedef ets_base<ets_no_key> super;
0280 #if _WIN32||_WIN64
0281 #if __TBB_WIN8UI_SUPPORT
0282 typedef DWORD tls_key_t;
0283 void create_key() { my_key = FlsAlloc(NULL); }
0284 void destroy_key() { FlsFree(my_key); }
0285 void set_tls(void * value) { FlsSetValue(my_key, (LPVOID)value); }
0286 void* get_tls() { return (void *)FlsGetValue(my_key); }
0287 #else
0288 typedef DWORD tls_key_t;
0289 void create_key() { my_key = TlsAlloc(); }
0290 void destroy_key() { TlsFree(my_key); }
0291 void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
0292 void* get_tls() { return (void *)TlsGetValue(my_key); }
0293 #endif
0294 #else
0295 typedef pthread_key_t tls_key_t;
0296 void create_key() { pthread_key_create(&my_key, NULL); }
0297 void destroy_key() { pthread_key_delete(my_key); }
0298 void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
0299 void* get_tls() const { return pthread_getspecific(my_key); }
0300 #endif
0301 tls_key_t my_key;
0302 virtual void* create_local() __TBB_override = 0;
0303 virtual void* create_array(size_t _size) __TBB_override = 0;
0304 virtual void free_array(void* ptr, size_t _size) __TBB_override = 0;
0305 protected:
0306 ets_base() {create_key();}
0307 ~ets_base() {destroy_key();}
0308 void* table_lookup( bool& exists ) {
0309 void* found = get_tls();
0310 if( found ) {
0311 exists=true;
0312 } else {
0313 found = super::table_lookup(exists);
0314 set_tls(found);
0315 }
0316 return found;
0317 }
0318 void table_clear() {
0319 destroy_key();
0320 create_key();
0321 super::table_clear();
0322 }
0323 void table_swap( ets_base& other ) {
0324 using std::swap;
0325 __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
0326 swap(my_key, other.my_key);
0327 super::table_swap(other);
0328 }
0329 };
0330
0331
0332 template< typename Container, typename Value >
0333 class enumerable_thread_specific_iterator
0334 #if defined(_WIN64) && defined(_MSC_VER)
0335
0336 : public std::iterator<std::random_access_iterator_tag,Value>
0337 #endif
0338 {
0339
0340
0341 Container *my_container;
0342 typename Container::size_type my_index;
0343 mutable Value *my_value;
0344
0345 template<typename C, typename T>
0346 friend enumerable_thread_specific_iterator<C,T>
0347 operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<C,T>& v );
0348
0349 template<typename C, typename T, typename U>
0350 friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
0351 const enumerable_thread_specific_iterator<C,U>& j );
0352
0353 template<typename C, typename T, typename U>
0354 friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,
0355 const enumerable_thread_specific_iterator<C,U>& j );
0356
0357 template<typename C, typename T, typename U>
0358 friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i,
0359 const enumerable_thread_specific_iterator<C,U>& j );
0360
0361 template<typename C, typename U>
0362 friend class enumerable_thread_specific_iterator;
0363
0364 public:
0365
0366 enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) :
0367 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
0368
0369
0370 enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
0371
0372 template<typename U>
0373 enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
0374 my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
0375
0376 enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
0377 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
0378 }
0379
0380 enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
0381 my_index += offset;
0382 my_value = NULL;
0383 return *this;
0384 }
0385
0386 enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
0387 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
0388 }
0389
0390 enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
0391 my_index -= offset;
0392 my_value = NULL;
0393 return *this;
0394 }
0395
0396 Value& operator*() const {
0397 Value* value = my_value;
0398 if( !value ) {
0399 value = my_value = (*my_container)[my_index].value();
0400 }
0401 __TBB_ASSERT( value==(*my_container)[my_index].value(), "corrupt cache" );
0402 return *value;
0403 }
0404
0405 Value& operator[]( ptrdiff_t k ) const {
0406 return (*my_container)[my_index + k].value;
0407 }
0408
0409 Value* operator->() const {return &operator*();}
0410
0411 enumerable_thread_specific_iterator& operator++() {
0412 ++my_index;
0413 my_value = NULL;
0414 return *this;
0415 }
0416
0417 enumerable_thread_specific_iterator& operator--() {
0418 --my_index;
0419 my_value = NULL;
0420 return *this;
0421 }
0422
0423
0424 enumerable_thread_specific_iterator operator++(int) {
0425 enumerable_thread_specific_iterator result = *this;
0426 ++my_index;
0427 my_value = NULL;
0428 return result;
0429 }
0430
0431
0432 enumerable_thread_specific_iterator operator--(int) {
0433 enumerable_thread_specific_iterator result = *this;
0434 --my_index;
0435 my_value = NULL;
0436 return result;
0437 }
0438
0439
0440 typedef ptrdiff_t difference_type;
0441 typedef Value value_type;
0442 typedef Value* pointer;
0443 typedef Value& reference;
0444 typedef std::random_access_iterator_tag iterator_category;
0445 };
0446
0447 template<typename Container, typename T>
0448 enumerable_thread_specific_iterator<Container,T>
0449 operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<Container,T>& v ) {
0450 return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
0451 }
0452
0453 template<typename Container, typename T, typename U>
0454 bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,
0455 const enumerable_thread_specific_iterator<Container,U>& j ) {
0456 return i.my_index==j.my_index && i.my_container == j.my_container;
0457 }
0458
0459 template<typename Container, typename T, typename U>
0460 bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i,
0461 const enumerable_thread_specific_iterator<Container,U>& j ) {
0462 return !(i==j);
0463 }
0464
0465 template<typename Container, typename T, typename U>
0466 bool operator<( const enumerable_thread_specific_iterator<Container,T>& i,
0467 const enumerable_thread_specific_iterator<Container,U>& j ) {
0468 return i.my_index<j.my_index;
0469 }
0470
0471 template<typename Container, typename T, typename U>
0472 bool operator>( const enumerable_thread_specific_iterator<Container,T>& i,
0473 const enumerable_thread_specific_iterator<Container,U>& j ) {
0474 return j<i;
0475 }
0476
0477 template<typename Container, typename T, typename U>
0478 bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i,
0479 const enumerable_thread_specific_iterator<Container,U>& j ) {
0480 return !(i<j);
0481 }
0482
0483 template<typename Container, typename T, typename U>
0484 bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i,
0485 const enumerable_thread_specific_iterator<Container,U>& j ) {
0486 return !(j<i);
0487 }
0488
0489 template<typename Container, typename T, typename U>
0490 ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i,
0491 const enumerable_thread_specific_iterator<Container,U>& j ) {
0492 return i.my_index-j.my_index;
0493 }
0494
0495 template<typename SegmentedContainer, typename Value >
0496 class segmented_iterator
0497 #if defined(_WIN64) && defined(_MSC_VER)
0498 : public std::iterator<std::input_iterator_tag, Value>
0499 #endif
0500 {
0501 template<typename C, typename T, typename U>
0502 friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
0503
0504 template<typename C, typename T, typename U>
0505 friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
0506
0507 template<typename C, typename U>
0508 friend class segmented_iterator;
0509
0510 public:
0511
0512 segmented_iterator() {my_segcont = NULL;}
0513
0514 segmented_iterator( const SegmentedContainer& _segmented_container ) :
0515 my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
0516 outer_iter(my_segcont->end()) { }
0517
0518 ~segmented_iterator() {}
0519
0520 typedef typename SegmentedContainer::iterator outer_iterator;
0521 typedef typename SegmentedContainer::value_type InnerContainer;
0522 typedef typename InnerContainer::iterator inner_iterator;
0523
0524
0525 typedef ptrdiff_t difference_type;
0526 typedef Value value_type;
0527 typedef typename SegmentedContainer::size_type size_type;
0528 typedef Value* pointer;
0529 typedef Value& reference;
0530 typedef std::input_iterator_tag iterator_category;
0531
0532
0533 template<typename U>
0534 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
0535 my_segcont(other.my_segcont),
0536 outer_iter(other.outer_iter),
0537
0538 inner_iter(other.inner_iter)
0539 {}
0540
0541
0542 template<typename U>
0543 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
0544 if(this != &other) {
0545 my_segcont = other.my_segcont;
0546 outer_iter = other.outer_iter;
0547 if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
0548 }
0549 return *this;
0550 }
0551
0552
0553
0554
0555 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
0556 __TBB_ASSERT(my_segcont != NULL, NULL);
0557
0558 for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
0559 if( !outer_iter->empty() ) {
0560 inner_iter = outer_iter->begin();
0561 break;
0562 }
0563 }
0564 return *this;
0565 }
0566
0567
0568 segmented_iterator& operator++() {
0569 advance_me();
0570 return *this;
0571 }
0572
0573
0574 segmented_iterator operator++(int) {
0575 segmented_iterator tmp = *this;
0576 operator++();
0577 return tmp;
0578 }
0579
0580 bool operator==(const outer_iterator& other_outer) const {
0581 __TBB_ASSERT(my_segcont != NULL, NULL);
0582 return (outer_iter == other_outer &&
0583 (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
0584 }
0585
0586 bool operator!=(const outer_iterator& other_outer) const {
0587 return !operator==(other_outer);
0588
0589 }
0590
0591
0592 reference operator*() const {
0593 __TBB_ASSERT(my_segcont != NULL, NULL);
0594 __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
0595 __TBB_ASSERT(inner_iter != outer_iter->end(), NULL);
0596 return *inner_iter;
0597 }
0598
0599
0600 pointer operator->() const { return &operator*();}
0601
0602 private:
0603 SegmentedContainer* my_segcont;
0604 outer_iterator outer_iter;
0605 inner_iterator inner_iter;
0606
0607 void advance_me() {
0608 __TBB_ASSERT(my_segcont != NULL, NULL);
0609 __TBB_ASSERT(outer_iter != my_segcont->end(), NULL);
0610 __TBB_ASSERT(inner_iter != outer_iter->end(), NULL);
0611 ++inner_iter;
0612 while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
0613 inner_iter = outer_iter->begin();
0614 }
0615 }
0616 };
0617
0618 template<typename SegmentedContainer, typename T, typename U>
0619 bool operator==( const segmented_iterator<SegmentedContainer,T>& i,
0620 const segmented_iterator<SegmentedContainer,U>& j ) {
0621 if(i.my_segcont != j.my_segcont) return false;
0622 if(i.my_segcont == NULL) return true;
0623 if(i.outer_iter != j.outer_iter) return false;
0624 if(i.outer_iter == i.my_segcont->end()) return true;
0625 return i.inner_iter == j.inner_iter;
0626 }
0627
0628
0629 template<typename SegmentedContainer, typename T, typename U>
0630 bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
0631 const segmented_iterator<SegmentedContainer,U>& j ) {
0632 return !(i==j);
0633 }
0634
0635 template<typename T>
0636 struct construct_by_default: tbb::internal::no_assign {
0637 void construct(void*where) {new(where) T();}
0638 construct_by_default( int ) {}
0639 };
0640
0641 template<typename T>
0642 struct construct_by_exemplar: tbb::internal::no_assign {
0643 const T exemplar;
0644 void construct(void*where) {new(where) T(exemplar);}
0645 construct_by_exemplar( const T& t ) : exemplar(t) {}
0646 #if __TBB_ETS_USE_CPP11
0647 construct_by_exemplar( T&& t ) : exemplar(std::move(t)) {}
0648 #endif
0649 };
0650
0651 template<typename T, typename Finit>
0652 struct construct_by_finit: tbb::internal::no_assign {
0653 Finit f;
0654 void construct(void* where) {new(where) T(f());}
0655 construct_by_finit( const Finit& f_ ) : f(f_) {}
0656 #if __TBB_ETS_USE_CPP11
0657 construct_by_finit( Finit&& f_ ) : f(std::move(f_)) {}
0658 #endif
0659 };
0660
0661 #if __TBB_ETS_USE_CPP11
0662 template<typename T, typename... P>
0663 struct construct_by_args: tbb::internal::no_assign {
0664 internal::stored_pack<P...> pack;
0665 void construct(void* where) {
0666 internal::call( [where](const typename strip<P>::type&... args ){
0667 new(where) T(args...);
0668 }, pack );
0669 }
0670 construct_by_args( P&& ... args ) : pack(std::forward<P>(args)...) {}
0671 };
0672 #endif
0673
0674
0675
0676 template<typename T>
0677 class callback_base {
0678 public:
0679
0680 virtual callback_base* clone() const = 0;
0681
0682 virtual void destroy() = 0;
0683
0684 virtual ~callback_base() { }
0685
0686 virtual void construct(void* where) = 0;
0687 };
0688
0689 template <typename T, typename Constructor>
0690 class callback_leaf: public callback_base<T>, Constructor {
0691 #if __TBB_ETS_USE_CPP11
0692 template<typename... P> callback_leaf( P&& ... params ) : Constructor(std::forward<P>(params)...) {}
0693 #else
0694 template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
0695 #endif
0696
0697 typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
0698
0699 callback_base<T>* clone() const __TBB_override {
0700 return make(*this);
0701 }
0702
0703 void destroy() __TBB_override {
0704 my_allocator_type().destroy(this);
0705 my_allocator_type().deallocate(this,1);
0706 }
0707
0708 void construct(void* where) __TBB_override {
0709 Constructor::construct(where);
0710 }
0711 public:
0712 #if __TBB_ETS_USE_CPP11
0713 template<typename... P>
0714 static callback_base<T>* make( P&& ... params ) {
0715 void* where = my_allocator_type().allocate(1);
0716 return new(where) callback_leaf( std::forward<P>(params)... );
0717 }
0718 #else
0719 template<typename X>
0720 static callback_base<T>* make( const X& x ) {
0721 void* where = my_allocator_type().allocate(1);
0722 return new(where) callback_leaf(x);
0723 }
0724 #endif
0725 };
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736 template<typename U>
0737 struct ets_element {
0738 tbb::aligned_space<U> my_space;
0739 bool is_built;
0740 ets_element() { is_built = false; }
0741 U* value() { return my_space.begin(); }
0742 U* value_committed() { is_built = true; return my_space.begin(); }
0743 ~ets_element() {
0744 if(is_built) {
0745 my_space.begin()->~U();
0746 is_built = false;
0747 }
0748 }
0749 };
0750
0751
0752
0753
0754 template<typename T, typename ETS> struct is_compatible_ets { static const bool value = false; };
0755 template<typename T, typename U, typename A, ets_key_usage_type C>
0756 struct is_compatible_ets< T, enumerable_thread_specific<U,A,C> > { static const bool value = internal::is_same_type<T,U>::value; };
0757
0758 #if __TBB_ETS_USE_CPP11
0759
0760 template <typename T>
0761 class is_callable_no_args {
0762 private:
0763 typedef char yes[1];
0764 typedef char no [2];
0765
0766 template<typename U> static yes& decide( decltype(declval<U>()())* );
0767 template<typename U> static no& decide(...);
0768 public:
0769 static const bool value = (sizeof(decide<T>(NULL)) == sizeof(yes));
0770 };
0771 #endif
0772
0773 }
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 template <typename T,
0797 typename Allocator=cache_aligned_allocator<T>,
0798 ets_key_usage_type ETS_key_type=ets_no_key >
0799 class enumerable_thread_specific: internal::ets_base<ETS_key_type> {
0800
0801 template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
0802
0803 typedef internal::padded< internal::ets_element<T> > padded_element;
0804
0805
0806 template<typename I>
0807 class generic_range_type: public blocked_range<I> {
0808 public:
0809 typedef T value_type;
0810 typedef T& reference;
0811 typedef const T& const_reference;
0812 typedef I iterator;
0813 typedef ptrdiff_t difference_type;
0814 generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
0815 template<typename U>
0816 generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
0817 generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
0818 };
0819
0820 typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
0821 typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;
0822
0823 internal::callback_base<T> *my_construct_callback;
0824
0825 internal_collection_type my_locals;
0826
0827
0828
0829 void* create_local() __TBB_override {
0830 padded_element& lref = *my_locals.grow_by(1);
0831 my_construct_callback->construct(lref.value());
0832 return lref.value_committed();
0833 }
0834
0835 static void* create_local_by_copy( internal::ets_base<ETS_key_type>& base, void* p ) {
0836 enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
0837 padded_element& lref = *ets.my_locals.grow_by(1);
0838 new(lref.value()) T(*static_cast<T*>(p));
0839 return lref.value_committed();
0840 }
0841
0842 #if __TBB_ETS_USE_CPP11
0843 static void* create_local_by_move( internal::ets_base<ETS_key_type>& base, void* p ) {
0844 enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
0845 padded_element& lref = *ets.my_locals.grow_by(1);
0846 new(lref.value()) T(std::move(*static_cast<T*>(p)));
0847 return lref.value_committed();
0848 }
0849 #endif
0850
0851 typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
0852
0853
0854 void* create_array(size_t _size) __TBB_override {
0855 size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
0856 return array_allocator_type().allocate(nelements);
0857 }
0858
0859 void free_array( void* _ptr, size_t _size) __TBB_override {
0860 size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
0861 array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
0862 }
0863
0864 public:
0865
0866
0867 typedef Allocator allocator_type;
0868 typedef T value_type;
0869 typedef T& reference;
0870 typedef const T& const_reference;
0871 typedef T* pointer;
0872 typedef const T* const_pointer;
0873 typedef typename internal_collection_type::size_type size_type;
0874 typedef typename internal_collection_type::difference_type difference_type;
0875
0876
0877 typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
0878 typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
0879
0880
0881 typedef generic_range_type< iterator > range_type;
0882 typedef generic_range_type< const_iterator > const_range_type;
0883
0884
0885 enumerable_thread_specific() : my_construct_callback(
0886 internal::callback_leaf<T,internal::construct_by_default<T> >::make(0)
0887 ){}
0888
0889
0890 template <typename Finit
0891 #if __TBB_ETS_USE_CPP11
0892 , typename = typename internal::enable_if<internal::is_callable_no_args<typename internal::strip<Finit>::type>::value>::type
0893 #endif
0894 >
0895 explicit enumerable_thread_specific( Finit finit ) : my_construct_callback(
0896 internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( tbb::internal::move(finit) )
0897 ){}
0898
0899
0900 explicit enumerable_thread_specific( const T& exemplar ) : my_construct_callback(
0901 internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar )
0902 ){}
0903
0904 #if __TBB_ETS_USE_CPP11
0905 explicit enumerable_thread_specific( T&& exemplar ) : my_construct_callback(
0906 internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( std::move(exemplar) )
0907 ){}
0908
0909
0910 template <typename P1, typename... P,
0911 typename = typename internal::enable_if<!internal::is_callable_no_args<typename internal::strip<P1>::type>::value
0912 && !internal::is_compatible_ets<T, typename internal::strip<P1>::type>::value
0913 && !internal::is_same_type<T, typename internal::strip<P1>::type>::value
0914 >::type>
0915 enumerable_thread_specific( P1&& arg1, P&& ... args ) : my_construct_callback(
0916 internal::callback_leaf<T,internal::construct_by_args<T,P1,P...> >::make( std::forward<P1>(arg1), std::forward<P>(args)... )
0917 ){}
0918 #endif
0919
0920
0921 ~enumerable_thread_specific() {
0922 if(my_construct_callback) my_construct_callback->destroy();
0923
0924 this->internal::ets_base<ETS_key_type>::table_clear();
0925 }
0926
0927
0928 reference local() {
0929 bool exists;
0930 return local(exists);
0931 }
0932
0933
0934 reference local(bool& exists) {
0935 void* ptr = this->table_lookup(exists);
0936 return *(T*)ptr;
0937 }
0938
0939
0940 size_type size() const { return my_locals.size(); }
0941
0942
0943 bool empty() const { return my_locals.empty(); }
0944
0945
0946 iterator begin() { return iterator( my_locals, 0 ); }
0947
0948 iterator end() { return iterator(my_locals, my_locals.size() ); }
0949
0950
0951 const_iterator begin() const { return const_iterator(my_locals, 0); }
0952
0953
0954 const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
0955
0956
0957 range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); }
0958
0959
0960 const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
0961
0962
0963 void clear() {
0964 my_locals.clear();
0965 this->table_clear();
0966
0967 }
0968
0969 private:
0970
0971 template<typename A2, ets_key_usage_type C2>
0972 void internal_copy(const enumerable_thread_specific<T, A2, C2>& other) {
0973 #if __TBB_ETS_USE_CPP11 && TBB_USE_ASSERT
0974
0975 __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
0976 #endif
0977
0978 my_construct_callback = other.my_construct_callback->clone();
0979 __TBB_ASSERT(my_locals.size()==0,NULL);
0980 my_locals.reserve(other.size());
0981 this->table_elementwise_copy( other, create_local_by_copy );
0982 }
0983
0984 void internal_swap(enumerable_thread_specific& other) {
0985 using std::swap;
0986 __TBB_ASSERT( this!=&other, NULL );
0987 swap(my_construct_callback, other.my_construct_callback);
0988
0989
0990 swap(my_locals, other.my_locals);
0991 this->internal::ets_base<ETS_key_type>::table_swap(other);
0992 }
0993
0994 #if __TBB_ETS_USE_CPP11
0995 template<typename A2, ets_key_usage_type C2>
0996 void internal_move(enumerable_thread_specific<T, A2, C2>&& other) {
0997 #if TBB_USE_ASSERT
0998
0999 __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
1000 #endif
1001 my_construct_callback = other.my_construct_callback;
1002 other.my_construct_callback = NULL;
1003 __TBB_ASSERT(my_locals.size()==0,NULL);
1004 my_locals.reserve(other.size());
1005 this->table_elementwise_copy( other, create_local_by_move );
1006 }
1007 #endif
1008
1009 public:
1010
1011 enumerable_thread_specific( const enumerable_thread_specific& other )
1012 : internal::ets_base<ETS_key_type>()
1013 {
1014 internal_copy(other);
1015 }
1016
1017 template<typename Alloc, ets_key_usage_type Cachetype>
1018 enumerable_thread_specific( const enumerable_thread_specific<T, Alloc, Cachetype>& other )
1019 {
1020 internal_copy(other);
1021 }
1022
1023 #if __TBB_ETS_USE_CPP11
1024 enumerable_thread_specific( enumerable_thread_specific&& other ) : my_construct_callback()
1025 {
1026 internal_swap(other);
1027 }
1028
1029 template<typename Alloc, ets_key_usage_type Cachetype>
1030 enumerable_thread_specific( enumerable_thread_specific<T, Alloc, Cachetype>&& other ) : my_construct_callback()
1031 {
1032 internal_move(std::move(other));
1033 }
1034 #endif
1035
1036 enumerable_thread_specific& operator=( const enumerable_thread_specific& other )
1037 {
1038 if( this != &other ) {
1039 this->clear();
1040 my_construct_callback->destroy();
1041 internal_copy( other );
1042 }
1043 return *this;
1044 }
1045
1046 template<typename Alloc, ets_key_usage_type Cachetype>
1047 enumerable_thread_specific& operator=( const enumerable_thread_specific<T, Alloc, Cachetype>& other )
1048 {
1049 __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL );
1050 this->clear();
1051 my_construct_callback->destroy();
1052 internal_copy(other);
1053 return *this;
1054 }
1055
1056 #if __TBB_ETS_USE_CPP11
1057 enumerable_thread_specific& operator=( enumerable_thread_specific&& other )
1058 {
1059 if( this != &other )
1060 internal_swap(other);
1061 return *this;
1062 }
1063
1064 template<typename Alloc, ets_key_usage_type Cachetype>
1065 enumerable_thread_specific& operator=( enumerable_thread_specific<T, Alloc, Cachetype>&& other )
1066 {
1067 __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL );
1068 this->clear();
1069 my_construct_callback->destroy();
1070 internal_move(std::move(other));
1071 return *this;
1072 }
1073 #endif
1074
1075
1076 template <typename combine_func_t>
1077 T combine(combine_func_t f_combine) {
1078 if(begin() == end()) {
1079 internal::ets_element<T> location;
1080 my_construct_callback->construct(location.value());
1081 return *location.value_committed();
1082 }
1083 const_iterator ci = begin();
1084 T my_result = *ci;
1085 while(++ci != end())
1086 my_result = f_combine( my_result, *ci );
1087 return my_result;
1088 }
1089
1090
1091 template <typename combine_func_t>
1092 void combine_each(combine_func_t f_combine) {
1093 for(iterator ci = begin(); ci != end(); ++ci) {
1094 f_combine( *ci );
1095 }
1096 }
1097
1098 };
1099
1100 template< typename Container >
1101 class flattened2d {
1102
1103
1104 typedef typename Container::value_type conval_type;
1105
1106 public:
1107
1108
1109 typedef typename conval_type::size_type size_type;
1110 typedef typename conval_type::difference_type difference_type;
1111 typedef typename conval_type::allocator_type allocator_type;
1112 typedef typename conval_type::value_type value_type;
1113 typedef typename conval_type::reference reference;
1114 typedef typename conval_type::const_reference const_reference;
1115 typedef typename conval_type::pointer pointer;
1116 typedef typename conval_type::const_pointer const_pointer;
1117
1118 typedef typename internal::segmented_iterator<Container, value_type> iterator;
1119 typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
1120
1121 flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) :
1122 my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
1123
1124 explicit flattened2d( const Container &c ) :
1125 my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
1126
1127 iterator begin() { return iterator(*my_container) = my_begin; }
1128 iterator end() { return iterator(*my_container) = my_end; }
1129 const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
1130 const_iterator end() const { return const_iterator(*my_container) = my_end; }
1131
1132 size_type size() const {
1133 size_type tot_size = 0;
1134 for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
1135 tot_size += i->size();
1136 }
1137 return tot_size;
1138 }
1139
1140 private:
1141
1142 Container *my_container;
1143 typename Container::const_iterator my_begin;
1144 typename Container::const_iterator my_end;
1145
1146 };
1147
1148 template <typename Container>
1149 flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
1150 return flattened2d<Container>(c, b, e);
1151 }
1152
1153 template <typename Container>
1154 flattened2d<Container> flatten2d(const Container &c) {
1155 return flattened2d<Container>(c);
1156 }
1157
1158 }
1159
1160 namespace internal {
1161 using interface6::internal::segmented_iterator;
1162 }
1163
1164 using interface6::enumerable_thread_specific;
1165 using interface6::flattened2d;
1166 using interface6::flatten2d;
1167
1168 }
1169
1170 #include "internal/_warning_suppress_disable_notice.h"
1171 #undef __TBB_enumerable_thread_specific_H_include_area
1172
1173 #endif