Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-07-30 08:46:19

0001 /*
0002     Copyright (c) 2005-2022 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_detail__segment_table_H
0018 #define __TBB_detail__segment_table_H
0019 
0020 #include "_config.h"
0021 #include "_allocator_traits.h"
0022 #include "_template_helpers.h"
0023 #include "_utils.h"
0024 #include "_assert.h"
0025 #include "_exception.h"
0026 #include <atomic>
0027 #include <type_traits>
0028 #include <memory>
0029 #include <cstring>
0030 
0031 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
0032 #pragma warning(push)
0033 #pragma warning(disable: 4127) // warning C4127: conditional expression is constant
0034 #endif
0035 
0036 namespace tbb {
0037 namespace detail {
0038 namespace d1 {
0039 
0040 template <typename T, typename Allocator, typename DerivedType, std::size_t PointersPerEmbeddedTable>
0041 class segment_table {
0042 public:
0043     using value_type = T;
0044     using segment_type = T*;
0045     using atomic_segment = std::atomic<segment_type>;
0046     using segment_table_type = atomic_segment*;
0047 
0048     using size_type = std::size_t;
0049     using segment_index_type = std::size_t;
0050 
0051     using allocator_type = Allocator;
0052 
0053     using allocator_traits_type = tbb::detail::allocator_traits<allocator_type>;
0054     using segment_table_allocator_type = typename allocator_traits_type::template rebind_alloc<atomic_segment>;
0055 protected:
0056     using segment_table_allocator_traits = tbb::detail::allocator_traits<segment_table_allocator_type>;
0057     using derived_type = DerivedType;
0058 
0059     static constexpr size_type pointers_per_embedded_table = PointersPerEmbeddedTable;
0060     static constexpr size_type pointers_per_long_table = sizeof(size_type) * 8;
0061 public:
0062     segment_table( const allocator_type& alloc = allocator_type() )
0063         : my_segment_table_allocator(alloc), my_segment_table(nullptr)
0064         , my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
0065     {
0066         my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
0067         zero_table(my_embedded_table, pointers_per_embedded_table);
0068     }
0069 
0070     segment_table( const segment_table& other )
0071         : my_segment_table_allocator(segment_table_allocator_traits::
0072                                      select_on_container_copy_construction(other.my_segment_table_allocator))
0073         , my_segment_table(nullptr), my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
0074     {
0075         my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
0076         zero_table(my_embedded_table, pointers_per_embedded_table);
0077         try_call( [&] {
0078             internal_transfer(other, copy_segment_body_type{*this});
0079         } ).on_exception( [&] {
0080             clear();
0081         });
0082     }
0083 
0084     segment_table( const segment_table& other, const allocator_type& alloc )
0085         : my_segment_table_allocator(alloc), my_segment_table(nullptr)
0086         , my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
0087     {
0088         my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
0089         zero_table(my_embedded_table, pointers_per_embedded_table);
0090         try_call( [&] {
0091             internal_transfer(other, copy_segment_body_type{*this});
0092         } ).on_exception( [&] {
0093             clear();
0094         });
0095     }
0096 
0097     segment_table( segment_table&& other )
0098         : my_segment_table_allocator(std::move(other.my_segment_table_allocator)), my_segment_table(nullptr)
0099         , my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
0100     {
0101         my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
0102         zero_table(my_embedded_table, pointers_per_embedded_table);
0103         internal_move(std::move(other));
0104     }
0105 
0106     segment_table( segment_table&& other, const allocator_type& alloc )
0107         : my_segment_table_allocator(alloc), my_segment_table(nullptr), my_first_block{}
0108         , my_size{}, my_segment_table_allocation_failed{}
0109     {
0110         my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
0111         zero_table(my_embedded_table, pointers_per_embedded_table);
0112         using is_equal_type = typename segment_table_allocator_traits::is_always_equal;
0113         internal_move_construct_with_allocator(std::move(other), alloc, is_equal_type());
0114     }
0115 
0116     ~segment_table() {
0117         clear();
0118     }
0119 
0120     segment_table& operator=( const segment_table& other ) {
0121         if (this != &other) {
0122             copy_assign_allocators(my_segment_table_allocator, other.my_segment_table_allocator);
0123             internal_transfer(other, copy_segment_body_type{*this});
0124         }
0125         return *this;
0126     }
0127 
0128     segment_table& operator=( segment_table&& other )
0129         noexcept(derived_type::is_noexcept_assignment)
0130     {
0131         using pocma_type = typename segment_table_allocator_traits::propagate_on_container_move_assignment;
0132         using is_equal_type = typename segment_table_allocator_traits::is_always_equal;
0133 
0134         if (this != &other) {
0135             move_assign_allocators(my_segment_table_allocator, other.my_segment_table_allocator);
0136             internal_move_assign(std::move(other), tbb::detail::disjunction<is_equal_type, pocma_type>());
0137         }
0138         return *this;
0139     }
0140 
0141     void swap( segment_table& other )
0142         noexcept(derived_type::is_noexcept_swap)
0143     {
0144         using is_equal_type = typename segment_table_allocator_traits::is_always_equal;
0145         using pocs_type = typename segment_table_allocator_traits::propagate_on_container_swap;
0146 
0147         if (this != &other) {
0148             swap_allocators(my_segment_table_allocator, other.my_segment_table_allocator);
0149             internal_swap(other, tbb::detail::disjunction<is_equal_type, pocs_type>());
0150         }
0151     }
0152 
0153     segment_type get_segment( segment_index_type index ) const {
0154         return get_table()[index] + segment_base(index);
0155     }
0156 
0157     value_type& operator[]( size_type index ) {
0158         return internal_subscript<true>(index);
0159     }
0160 
0161     const value_type& operator[]( size_type index ) const {
0162         return const_cast<segment_table*>(this)->internal_subscript<true>(index);
0163     }
0164 
0165     const segment_table_allocator_type& get_allocator() const {
0166         return my_segment_table_allocator;
0167     }
0168 
0169     segment_table_allocator_type& get_allocator() {
0170         return my_segment_table_allocator;
0171     }
0172 
0173     void enable_segment( segment_type& segment, segment_table_type table, segment_index_type seg_index, size_type index ) {
0174         // Allocate new segment
0175         segment_type new_segment = self()->create_segment(table, seg_index, index);
0176         if (new_segment != nullptr) {
0177             // Store (new_segment - segment_base) into the segment table to allow access to the table by index via
0178             // my_segment_table[segment_index_of(index)][index]
0179             segment_type disabled_segment = nullptr;
0180             if (!table[seg_index].compare_exchange_strong(disabled_segment, new_segment - segment_base(seg_index))) {
0181                 // compare_exchange failed => some other thread has already enabled this segment
0182                 // Deallocate the memory
0183                 self()->deallocate_segment(new_segment, seg_index);
0184             }
0185         }
0186 
0187         segment = table[seg_index].load(std::memory_order_acquire);
0188         __TBB_ASSERT(segment != nullptr, "If create_segment returned nullptr, the element should be stored in the table");
0189     }
0190 
0191     void delete_segment( segment_index_type seg_index ) {
0192         segment_type segment_to_delete = self()->nullify_segment(get_table(), seg_index);
0193         if (segment_to_delete == segment_allocation_failure_tag) {
0194             return;
0195         }
0196 
0197         segment_to_delete += segment_base(seg_index);
0198 
0199         // Deallocate the segment
0200         self()->destroy_segment(segment_to_delete, seg_index);
0201     }
0202 
0203     size_type number_of_segments( segment_table_type table ) const {
0204         // Check for an active table, if it is embedded table - return the number of embedded segments
0205         // Otherwise - return the maximum number of segments
0206         return table == my_embedded_table ? pointers_per_embedded_table : pointers_per_long_table;
0207     }
0208 
0209     size_type capacity() const noexcept {
0210         segment_table_type table = get_table();
0211         size_type num_segments = number_of_segments(table);
0212         for (size_type seg_index = 0; seg_index < num_segments; ++seg_index) {
0213             // Check if the pointer is valid (allocated)
0214             if (table[seg_index].load(std::memory_order_relaxed) <= segment_allocation_failure_tag) {
0215                 return segment_base(seg_index);
0216             }
0217         }
0218         return segment_base(num_segments);
0219     }
0220 
0221     size_type find_last_allocated_segment( segment_table_type table ) const noexcept {
0222         size_type end = 0;
0223         size_type num_segments = number_of_segments(table);
0224         for (size_type seg_index = 0; seg_index < num_segments; ++seg_index) {
0225             // Check if the pointer is valid (allocated)
0226             if (table[seg_index].load(std::memory_order_relaxed) > segment_allocation_failure_tag) {
0227                 end = seg_index + 1;
0228             }
0229         }
0230         return end;
0231     }
0232 
0233     void reserve( size_type n ) {
0234         if (n > allocator_traits_type::max_size(my_segment_table_allocator)) {
0235             throw_exception(exception_id::reservation_length_error);
0236         }
0237 
0238         size_type size = my_size.load(std::memory_order_relaxed);
0239         segment_index_type start_seg_idx = size == 0 ? 0 : segment_index_of(size - 1) + 1;
0240         for (segment_index_type seg_idx = start_seg_idx; segment_base(seg_idx) < n; ++seg_idx) {
0241                 size_type first_index = segment_base(seg_idx);
0242                 internal_subscript<true>(first_index);
0243         }
0244     }
0245 
0246     void clear() {
0247         clear_segments();
0248         clear_table();
0249         my_size.store(0, std::memory_order_relaxed);
0250         my_first_block.store(0, std::memory_order_relaxed);
0251     }
0252 
0253     void clear_segments() {
0254         segment_table_type current_segment_table = get_table();
0255         for (size_type i = number_of_segments(current_segment_table); i != 0; --i) {
0256             if (current_segment_table[i - 1].load(std::memory_order_relaxed) != nullptr) {
0257                 // If the segment was enabled - disable and deallocate it
0258                 delete_segment(i - 1);
0259             }
0260         }
0261     }
0262 
0263     void clear_table() {
0264         segment_table_type current_segment_table = get_table();
0265         if (current_segment_table != my_embedded_table) {
0266             // If the active table is not the embedded one - deallocate the active table
0267             for (size_type i = 0; i != pointers_per_long_table; ++i) {
0268                 segment_table_allocator_traits::destroy(my_segment_table_allocator, &current_segment_table[i]);
0269             }
0270 
0271             segment_table_allocator_traits::deallocate(my_segment_table_allocator, current_segment_table, pointers_per_long_table);
0272             my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
0273             zero_table(my_embedded_table, pointers_per_embedded_table);
0274         }
0275     }
0276 
0277     void extend_table_if_necessary(segment_table_type& table, size_type start_index, size_type end_index) {
0278         // extend_segment_table if an active table is an embedded table
0279         // and the requested index is not in the embedded table
0280         if (table == my_embedded_table && end_index > embedded_table_size) {
0281             if (start_index <= embedded_table_size) {
0282                 try_call([&] {
0283                     table = self()->allocate_long_table(my_embedded_table, start_index);
0284                     // It is possible that the table was extended by the thread that allocated first_block.
0285                     // In this case it is necessary to re-read the current table.
0286 
0287                     if (table) {
0288                         my_segment_table.store(table, std::memory_order_release);
0289                     } else {
0290                         table = my_segment_table.load(std::memory_order_acquire);
0291                     }
0292                 }).on_exception([&] {
0293                     my_segment_table_allocation_failed.store(true, std::memory_order_relaxed);
0294                 });
0295             } else {
0296                 atomic_backoff backoff;
0297                 do {
0298                     if (my_segment_table_allocation_failed.load(std::memory_order_relaxed)) {
0299                         throw_exception(exception_id::bad_alloc);
0300                     }
0301                     backoff.pause();
0302                     table = my_segment_table.load(std::memory_order_acquire);
0303                 } while (table == my_embedded_table);
0304             }
0305         }
0306     }
0307 
0308     // Return the segment where index is stored
0309     static constexpr segment_index_type segment_index_of( size_type index ) {
0310         return size_type(tbb::detail::log2(uintptr_t(index|1)));
0311     }
0312 
0313     // Needed to calculate the offset in segment
0314     static constexpr size_type segment_base( size_type index ) {
0315         return size_type(1) << index & ~size_type(1);
0316     }
0317 
0318     // Return size of the segment
0319     static constexpr size_type segment_size( size_type index ) {
0320         return index == 0 ? 2 : size_type(1) << index;
0321     }
0322 
0323 private:
0324 
0325     derived_type* self() {
0326         return static_cast<derived_type*>(this);
0327     }
0328 
0329     struct copy_segment_body_type {
0330         void operator()( segment_index_type index, segment_type from, segment_type to ) const {
0331             my_instance.self()->copy_segment(index, from, to);
0332         }
0333         segment_table& my_instance;
0334     };
0335 
0336     struct move_segment_body_type {
0337         void operator()( segment_index_type index, segment_type from, segment_type to ) const {
0338             my_instance.self()->move_segment(index, from, to);
0339         }
0340         segment_table& my_instance;
0341     };
0342 
0343     // Transgers all segments from the other table
0344     template <typename TransferBody>
0345     void internal_transfer( const segment_table& other, TransferBody transfer_segment ) {
0346         static_cast<derived_type*>(this)->destroy_elements();
0347 
0348         assign_first_block_if_necessary(other.my_first_block.load(std::memory_order_relaxed));
0349         my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
0350 
0351         segment_table_type other_table = other.get_table();
0352         size_type end_segment_size = segment_size(other.find_last_allocated_segment(other_table));
0353 
0354         // If an exception occurred in other, then the size may be greater than the size of the end segment.
0355         size_type other_size = end_segment_size < other.my_size.load(std::memory_order_relaxed) ?
0356             other.my_size.load(std::memory_order_relaxed) : end_segment_size;
0357         other_size = my_segment_table_allocation_failed ? embedded_table_size : other_size;
0358 
0359         for (segment_index_type i = 0; segment_base(i) < other_size; ++i) {
0360             // If the segment in other table is enabled - transfer it
0361             if (other_table[i].load(std::memory_order_relaxed) == segment_allocation_failure_tag)
0362             {
0363                     my_size = segment_base(i);
0364                     break;
0365             } else if (other_table[i].load(std::memory_order_relaxed) != nullptr) {
0366                 internal_subscript<true>(segment_base(i));
0367                 transfer_segment(i, other.get_table()[i].load(std::memory_order_relaxed) + segment_base(i),
0368                                 get_table()[i].load(std::memory_order_relaxed) + segment_base(i));
0369             }
0370         }
0371     }
0372 
0373     // Moves the other segment table
0374     // Only equal allocators are allowed
0375     void internal_move( segment_table&& other ) {
0376         // NOTE: allocators should be equal
0377         clear();
0378         my_first_block.store(other.my_first_block.load(std::memory_order_relaxed), std::memory_order_relaxed);
0379         my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
0380         // If an active table in other is embedded - restore all of the embedded segments
0381         if (other.get_table() == other.my_embedded_table) {
0382             for ( size_type i = 0; i != pointers_per_embedded_table; ++i ) {
0383                 segment_type other_segment = other.my_embedded_table[i].load(std::memory_order_relaxed);
0384                 my_embedded_table[i].store(other_segment, std::memory_order_relaxed);
0385                 other.my_embedded_table[i].store(nullptr, std::memory_order_relaxed);
0386             }
0387             my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
0388         } else {
0389             my_segment_table.store(other.my_segment_table, std::memory_order_relaxed);
0390             other.my_segment_table.store(other.my_embedded_table, std::memory_order_relaxed);
0391             zero_table(other.my_embedded_table, pointers_per_embedded_table);
0392         }
0393         other.my_size.store(0, std::memory_order_relaxed);
0394     }
0395 
0396     // Move construct the segment table with the allocator object
0397     // if any instances of allocator_type are always equal
0398     void internal_move_construct_with_allocator( segment_table&& other, const allocator_type&,
0399                                                  /*is_always_equal = */ std::true_type ) {
0400         internal_move(std::move(other));
0401     }
0402 
0403     // Move construct the segment table with the allocator object
0404     // if any instances of allocator_type are always equal
0405     void internal_move_construct_with_allocator( segment_table&& other, const allocator_type& alloc,
0406                                                  /*is_always_equal = */ std::false_type ) {
0407         if (other.my_segment_table_allocator == alloc) {
0408             // If allocators are equal - restore pointers
0409             internal_move(std::move(other));
0410         } else {
0411             // If allocators are not equal - perform per element move with reallocation
0412             try_call( [&] {
0413                 internal_transfer(other, move_segment_body_type{*this});
0414             } ).on_exception( [&] {
0415                 clear();
0416             });
0417         }
0418     }
0419 
0420     // Move assigns the segment table to other is any instances of allocator_type are always equal
0421     // or propagate_on_container_move_assignment is true
0422     void internal_move_assign( segment_table&& other, /*is_always_equal || POCMA = */ std::true_type ) {
0423         internal_move(std::move(other));
0424     }
0425 
0426     // Move assigns the segment table to other is any instances of allocator_type are not always equal
0427     // and propagate_on_container_move_assignment is false
0428     void internal_move_assign( segment_table&& other, /*is_always_equal || POCMA = */ std::false_type ) {
0429         if (my_segment_table_allocator == other.my_segment_table_allocator) {
0430             // If allocators are equal - restore pointers
0431             internal_move(std::move(other));
0432         } else {
0433             // If allocators are not equal - perform per element move with reallocation
0434             internal_transfer(other, move_segment_body_type{*this});
0435         }
0436     }
0437 
0438     // Swaps two segment tables if any instances of allocator_type are always equal
0439     // or propagate_on_container_swap is true
0440     void internal_swap( segment_table& other, /*is_always_equal || POCS = */ std::true_type ) {
0441         internal_swap_fields(other);
0442     }
0443 
0444     // Swaps two segment tables if any instances of allocator_type are not always equal
0445     // and propagate_on_container_swap is false
0446     // According to the C++ standard, swapping of two containers with unequal allocators
0447     // is an undefined behavior scenario
0448     void internal_swap( segment_table& other, /*is_always_equal || POCS = */ std::false_type ) {
0449         __TBB_ASSERT(my_segment_table_allocator == other.my_segment_table_allocator,
0450                      "Swapping with unequal allocators is not allowed");
0451         internal_swap_fields(other);
0452     }
0453 
0454     void internal_swap_fields( segment_table& other ) {
0455         // If an active table in either *this segment table or other is an embedded one - swaps the embedded tables
0456         if (get_table() == my_embedded_table ||
0457             other.get_table() == other.my_embedded_table) {
0458 
0459             for (size_type i = 0; i != pointers_per_embedded_table; ++i) {
0460                 segment_type current_segment = my_embedded_table[i].load(std::memory_order_relaxed);
0461                 segment_type other_segment = other.my_embedded_table[i].load(std::memory_order_relaxed);
0462 
0463                 my_embedded_table[i].store(other_segment, std::memory_order_relaxed);
0464                 other.my_embedded_table[i].store(current_segment, std::memory_order_relaxed);
0465             }
0466         }
0467 
0468         segment_table_type current_segment_table = get_table();
0469         segment_table_type other_segment_table = other.get_table();
0470 
0471         // If an active table is an embedded one -
0472         // store an active table in other to the embedded one from other
0473         if (current_segment_table == my_embedded_table) {
0474             other.my_segment_table.store(other.my_embedded_table, std::memory_order_relaxed);
0475         } else {
0476             // Otherwise - store it to the active segment table
0477             other.my_segment_table.store(current_segment_table, std::memory_order_relaxed);
0478         }
0479 
0480         // If an active table in other segment table is an embedded one -
0481         // store an active table in other to the embedded one from *this
0482         if (other_segment_table == other.my_embedded_table) {
0483             my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
0484         } else {
0485             // Otherwise - store it to the active segment table in other
0486             my_segment_table.store(other_segment_table, std::memory_order_relaxed);
0487         }
0488         auto first_block = other.my_first_block.load(std::memory_order_relaxed);
0489         other.my_first_block.store(my_first_block.load(std::memory_order_relaxed), std::memory_order_relaxed);
0490         my_first_block.store(first_block, std::memory_order_relaxed);
0491 
0492         auto size = other.my_size.load(std::memory_order_relaxed);
0493         other.my_size.store(my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
0494         my_size.store(size, std::memory_order_relaxed);
0495     }
0496 
0497 protected:
0498     // A flag indicates that an exception was throws during segment allocations
0499     const segment_type segment_allocation_failure_tag = reinterpret_cast<segment_type>(1);
0500     static constexpr size_type embedded_table_size = segment_size(pointers_per_embedded_table);
0501 
0502     template <bool allow_out_of_range_access>
0503     value_type& internal_subscript( size_type index ) {
0504         segment_index_type seg_index = segment_index_of(index);
0505         segment_table_type table = my_segment_table.load(std::memory_order_acquire);
0506         segment_type segment = nullptr;
0507 
0508         if (allow_out_of_range_access) {
0509             if (derived_type::allow_table_extending) {
0510                 extend_table_if_necessary(table, index, index + 1);
0511             }
0512 
0513             segment = table[seg_index].load(std::memory_order_acquire);
0514             // If the required segment is disabled - enable it
0515             if (segment == nullptr) {
0516                 enable_segment(segment, table, seg_index, index);
0517             }
0518             // Check if an exception was thrown during segment allocation
0519             if (segment == segment_allocation_failure_tag) {
0520                 throw_exception(exception_id::bad_alloc);
0521             }
0522         } else {
0523             segment = table[seg_index].load(std::memory_order_acquire);
0524         }
0525         __TBB_ASSERT(segment != nullptr, nullptr);
0526 
0527         return segment[index];
0528     }
0529 
0530     void assign_first_block_if_necessary(segment_index_type index) {
0531         size_type zero = 0;
0532         if (this->my_first_block.load(std::memory_order_relaxed) == zero) {
0533             this->my_first_block.compare_exchange_strong(zero, index);
0534         }
0535     }
0536 
0537     void zero_table( segment_table_type table, size_type count ) {
0538         for (size_type i = 0; i != count; ++i) {
0539             table[i].store(nullptr, std::memory_order_relaxed);
0540         }
0541     }
0542 
0543     segment_table_type get_table() const {
0544         return my_segment_table.load(std::memory_order_acquire);
0545     }
0546 
0547     segment_table_allocator_type my_segment_table_allocator;
0548     std::atomic<segment_table_type> my_segment_table;
0549     atomic_segment my_embedded_table[pointers_per_embedded_table];
0550     // Number of segments in first block
0551     std::atomic<size_type> my_first_block;
0552     // Number of elements in table
0553     std::atomic<size_type> my_size;
0554     // Flag to indicate failed extend table
0555     std::atomic<bool> my_segment_table_allocation_failed;
0556 }; // class segment_table
0557 
0558 } // namespace d1
0559 } // namespace detail
0560 } // namespace tbb
0561 
0562 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
0563 #pragma warning(pop) // warning 4127 is back
0564 #endif
0565 
0566 #endif // __TBB_detail__segment_table_H