Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-09-15 08:39:14

0001 //  Copyright (C) 2008-2013 Tim Blechmann
0002 //
0003 //  Distributed under the Boost Software License, Version 1.0. (See
0004 //  accompanying file LICENSE_1_0.txt or copy at
0005 //  http://www.boost.org/LICENSE_1_0.txt)
0006 
0007 #ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
0008 #define BOOST_LOCKFREE_STACK_HPP_INCLUDED
0009 
0010 #include <boost/config.hpp>
0011 #ifdef BOOST_HAS_PRAGMA_ONCE
0012 #    pragma once
0013 #endif
0014 
0015 
0016 #include <boost/assert.hpp>
0017 #include <boost/core/allocator_access.hpp>
0018 #include <boost/core/no_exceptions_support.hpp>
0019 #include <boost/core/span.hpp>
0020 #include <boost/parameter/optional.hpp>
0021 #include <boost/parameter/parameters.hpp>
0022 #include <boost/static_assert.hpp>
0023 
0024 #include <boost/lockfree/detail/atomic.hpp>
0025 #include <boost/lockfree/detail/copy_payload.hpp>
0026 #include <boost/lockfree/detail/freelist.hpp>
0027 #include <boost/lockfree/detail/parameter.hpp>
0028 #include <boost/lockfree/detail/tagged_ptr.hpp>
0029 #include <boost/lockfree/detail/uses_optional.hpp>
0030 #include <boost/lockfree/lockfree_forward.hpp>
0031 
0032 #include <tuple>
0033 #include <type_traits>
0034 
0035 namespace boost { namespace lockfree {
0036 namespace detail {
0037 
0038 typedef parameter::parameters< boost::parameter::optional< tag::allocator >, boost::parameter::optional< tag::capacity > >
0039     stack_signature;
0040 
0041 } // namespace detail
0042 
0043 /** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
0044  *  construction/destruction has to be synchronized. It uses a freelist for memory management,
0045  *  freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
0046  *
0047  *  \b Policies:
0048  *
0049  *  - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
0050  *    Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
0051  *    If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are
0052  * addressed by array indexing. This limits the possible size of the stack to the number of elements that can be
0053  * addressed by the index type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange
0054  * instructions, this is the best way to achieve lock-freedom.
0055  *
0056  *  - \c boost::lockfree::capacity<>, optional <br>
0057  *    If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
0058  *    It this option implies \c fixed_sized<true>
0059  *
0060  *  - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
0061  *    Specifies the allocator that is used for the internal freelist
0062  *
0063  *  \b Requirements:
0064  *  - T must have a copy constructor or a move constructor
0065  * */
0066 template < typename T, typename... Options >
0067 #if !defined( BOOST_NO_CXX20_HDR_CONCEPTS )
0068     requires( std::is_copy_assignable_v< T > || std::is_move_assignable_v< T > )
0069 #endif
0070 class stack
0071 {
0072 private:
0073 #ifndef BOOST_DOXYGEN_INVOKED
0074     BOOST_STATIC_ASSERT( std::is_copy_constructible< T >::value || std::is_move_constructible< T >::value );
0075 
0076     typedef typename detail::stack_signature::bind< Options... >::type bound_args;
0077 
0078     static const bool   has_capacity       = detail::extract_capacity< bound_args >::has_capacity;
0079     static const size_t capacity           = detail::extract_capacity< bound_args >::capacity;
0080     static const bool   fixed_sized        = detail::extract_fixed_sized< bound_args >::value;
0081     static const bool   node_based         = !( has_capacity || fixed_sized );
0082     static const bool   compile_time_sized = has_capacity;
0083 
0084     struct node
0085     {
0086         node( const T& val ) :
0087             v( val )
0088         {}
0089 
0090         node( T&& val ) :
0091             v( std::forward< T >( val ) )
0092         {}
0093 
0094         typedef typename detail::select_tagged_handle< node, node_based >::handle_type handle_t;
0095 
0096         handle_t next;
0097         T        v;
0098     };
0099 
0100     typedef typename detail::extract_allocator< bound_args, node >::type node_allocator;
0101     typedef
0102         typename detail::select_freelist< node, node_allocator, compile_time_sized, fixed_sized, capacity >::type pool_t;
0103     typedef typename pool_t::tagged_node_handle tagged_node_handle;
0104 
0105     // check compile-time capacity
0106     static constexpr bool capacity_is_valid = has_capacity ? capacity - 1 < std::numeric_limits< std::uint16_t >::max()
0107                                                            : true;
0108     BOOST_STATIC_ASSERT( capacity_is_valid );
0109 
0110     struct implementation_defined
0111     {
0112         typedef node_allocator allocator;
0113         typedef std::size_t    size_type;
0114     };
0115 #endif
0116 
0117 public:
0118     typedef T                                          value_type;
0119     typedef typename implementation_defined::allocator allocator;
0120     typedef typename implementation_defined::size_type size_type;
0121 
0122     /**
0123      * \return true, if implementation is lock-free.
0124      *
0125      * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
0126      *          On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
0127      *          there is no possibility to provide a completely accurate implementation, because one would need to test
0128      *          every internal node, which is impossible if further nodes will be allocated from the operating system.
0129      *
0130      * */
0131     bool is_lock_free( void ) const
0132     {
0133         return tos.is_lock_free() && pool.is_lock_free();
0134     }
0135 
0136     /** Construct a fixed-sized stack
0137      *
0138      *  \pre Must specify a capacity<> argument
0139      * */
0140     explicit stack( void )
0141 #if !defined( BOOST_NO_CXX20_HDR_CONCEPTS )
0142         requires( has_capacity )
0143 #endif
0144         :
0145         pool( node_allocator(), capacity )
0146     {
0147         // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
0148         // this function and this function may be compiled even when it isn't being used.
0149         BOOST_ASSERT( has_capacity );
0150         initialize();
0151     }
0152 
0153     /** Construct a fixed-sized stack with a custom allocator
0154      *
0155      *  \pre Must specify a capacity<> argument
0156      * */
0157     template < typename U, typename Enabler = std::enable_if< has_capacity > >
0158     explicit stack( typename boost::allocator_rebind< node_allocator, U >::type const& alloc ) :
0159         pool( alloc, capacity )
0160     {
0161         initialize();
0162     }
0163 
0164     /** Construct a fixed-sized stack with a custom allocator
0165      *
0166      *  \pre Must specify a capacity<> argument
0167      * */
0168     template < typename Enabler = std::enable_if< has_capacity > >
0169     explicit stack( allocator const& alloc ) :
0170         pool( alloc, capacity )
0171     {
0172         initialize();
0173     }
0174 
0175     /** Construct a variable-sized stack
0176      *
0177      *  Allocate n nodes initially for the freelist
0178      *
0179      *  \pre Must \b not specify a capacity<> argument
0180      * */
0181     template < typename Enabler = std::enable_if< !has_capacity > >
0182     explicit stack( size_type n ) :
0183         pool( node_allocator(), n )
0184     {
0185         initialize();
0186     }
0187 
0188     stack( const stack& )            = delete;
0189     stack& operator=( const stack& ) = delete;
0190     stack( stack&& )                 = delete;
0191     stack& operator=( stack&& )      = delete;
0192 
0193 
0194     /** Construct a variable-sized stack with a custom allocator
0195      *
0196      *  Allocate n nodes initially for the freelist
0197      *
0198      *  \pre Must \b not specify a capacity<> argument
0199      * */
0200     template < typename U, typename Enabler = std::enable_if< !has_capacity > >
0201     stack( size_type n, typename boost::allocator_rebind< node_allocator, U >::type const& alloc ) :
0202         pool( alloc, n )
0203     {
0204         initialize();
0205     }
0206 
0207     /** Construct a variable-sized stack with a custom allocator
0208      *
0209      *  Allocate n nodes initially for the freelist
0210      *
0211      *  \pre Must \b not specify a capacity<> argument
0212      * */
0213     template < typename Enabler = std::enable_if< !has_capacity > >
0214     stack( size_type n, node_allocator const& alloc ) :
0215         pool( alloc, n )
0216     {
0217         initialize();
0218     }
0219 
0220     /** Allocate n nodes for freelist
0221      *
0222      *  \pre  only valid if no capacity<> argument given
0223      *  \note thread-safe, may block if memory allocator blocks
0224      *
0225      * */
0226     template < typename Enabler = std::enable_if< !has_capacity > >
0227     void reserve( size_type n )
0228     {
0229         pool.template reserve< true >( n );
0230     }
0231 
0232     /** Allocate n nodes for freelist
0233      *
0234      *  \pre  only valid if no capacity<> argument given
0235      *  \note not thread-safe, may block if memory allocator blocks
0236      *
0237      * */
0238     template < typename Enabler = std::enable_if< !has_capacity > >
0239     void reserve_unsafe( size_type n )
0240     {
0241         pool.template reserve< false >( n );
0242     }
0243 
0244     /** Destroys stack, free all nodes from freelist.
0245      *
0246      *  \note not thread-safe
0247      *
0248      * */
0249     ~stack( void )
0250     {
0251         consume_all( []( const T& ) {} );
0252     }
0253 
0254 private:
0255 #ifndef BOOST_DOXYGEN_INVOKED
0256     void initialize( void )
0257     {
0258         tos.store( tagged_node_handle( pool.null_handle(), 0 ) );
0259     }
0260 
0261     void link_nodes_atomic( node* new_top_node, node* end_node )
0262     {
0263         tagged_node_handle old_tos = tos.load( detail::memory_order_relaxed );
0264         for ( ;; ) {
0265             tagged_node_handle new_tos( pool.get_handle( new_top_node ), old_tos.get_tag() );
0266             end_node->next = pool.get_handle( old_tos );
0267 
0268             if ( tos.compare_exchange_weak( old_tos, new_tos ) )
0269                 break;
0270         }
0271     }
0272 
0273     void link_nodes_unsafe( node* new_top_node, node* end_node )
0274     {
0275         tagged_node_handle old_tos = tos.load( detail::memory_order_relaxed );
0276 
0277         tagged_node_handle new_tos( pool.get_handle( new_top_node ), old_tos.get_tag() );
0278         end_node->next = pool.get_handle( old_tos );
0279 
0280         tos.store( new_tos, memory_order_relaxed );
0281     }
0282 
0283     template < bool Threadsafe, bool Bounded, typename ConstIterator >
0284     std::tuple< node*, node* > prepare_node_list( ConstIterator begin, ConstIterator end, ConstIterator& ret )
0285     {
0286         ConstIterator it       = begin;
0287         node*         end_node = pool.template construct< Threadsafe, Bounded >( *it++ );
0288         if ( end_node == NULL ) {
0289             ret = begin;
0290             return std::make_tuple< node*, node* >( NULL, NULL );
0291         }
0292 
0293         node* new_top_node = end_node;
0294         end_node->next     = NULL;
0295 
0296         BOOST_TRY
0297         {
0298             /* link nodes */
0299             for ( ; it != end; ++it ) {
0300                 node* newnode = pool.template construct< Threadsafe, Bounded >( *it );
0301                 if ( newnode == NULL )
0302                     break;
0303                 newnode->next = new_top_node;
0304                 new_top_node  = newnode;
0305             }
0306         }
0307         BOOST_CATCH( ... )
0308         {
0309             for ( node* current_node = new_top_node; current_node != NULL; ) {
0310                 node* next = current_node->next;
0311                 pool.template destruct< Threadsafe >( current_node );
0312                 current_node = next;
0313             }
0314             BOOST_RETHROW;
0315         }
0316         BOOST_CATCH_END
0317 
0318         ret = it;
0319         return std::make_tuple( new_top_node, end_node );
0320     }
0321 
0322     template < bool Bounded >
0323     bool do_push( T&& v )
0324     {
0325         node* newnode = pool.template construct< true, Bounded >( std::forward< T >( v ) );
0326         if ( newnode == 0 )
0327             return false;
0328 
0329         link_nodes_atomic( newnode, newnode );
0330         return true;
0331     }
0332 
0333     template < bool Bounded >
0334     bool do_push( T const& v )
0335     {
0336         node* newnode = pool.template construct< true, Bounded >( v );
0337         if ( newnode == 0 )
0338             return false;
0339 
0340         link_nodes_atomic( newnode, newnode );
0341         return true;
0342     }
0343 
0344     template < bool Bounded, typename ConstIterator >
0345     ConstIterator do_push( ConstIterator begin, ConstIterator end )
0346     {
0347         node*         new_top_node;
0348         node*         end_node;
0349         ConstIterator ret;
0350 
0351         std::tie( new_top_node, end_node ) = prepare_node_list< true, Bounded >( begin, end, ret );
0352         if ( new_top_node )
0353             link_nodes_atomic( new_top_node, end_node );
0354 
0355         return ret;
0356     }
0357 #endif
0358 
0359 public:
0360     /** Pushes object t to the stack.
0361      *
0362      * \post object will be pushed to the stack, if internal node can be allocated
0363      * \returns true, if the push operation is successful.
0364      *
0365      * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will
0366      * be allocated from the OS. This may not be lock-free. \throws if memory allocator throws
0367      * */
0368     bool push( const T& v )
0369     {
0370         return do_push< false >( v );
0371     }
0372 
0373     /// \copydoc boost::lockfree::stack::push(const T& t)
0374     bool push( T&& v )
0375     {
0376         return do_push< false >( std::forward< T >( v ) );
0377     }
0378 
0379     /** Pushes object t to the stack.
0380      *
0381      * \post object will be pushed to the stack, if internal node can be allocated
0382      * \returns true, if the push operation is successful.
0383      *
0384      * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
0385      * */
0386     bool bounded_push( const T& v )
0387     {
0388         return do_push< true >( v );
0389     }
0390 
0391     /// \copydoc boost::lockfree::stack::bounded_push(const T& t)
0392     bool bounded_push( T&& v )
0393     {
0394         return do_push< true >( std::forward< T >( v ) );
0395     }
0396 
0397 
0398     /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
0399      *
0400      * \return iterator to the first element, which has not been pushed
0401      *
0402      * \note Operation is applied atomically
0403      * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will
0404      * be allocated from the OS. This may not be lock-free.
0405      *
0406      * \throws if memory allocator throws
0407      */
0408     template < typename ConstIterator >
0409     ConstIterator push( ConstIterator begin, ConstIterator end )
0410     {
0411         return do_push< false, ConstIterator >( begin, end );
0412     }
0413 
0414     /** Pushes as many objects from the span as freelist node can be allocated.
0415      *
0416      * \return Number of elements pushed
0417      *
0418      * \note Operation is applied atomically
0419      * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will
0420      * be allocated from the OS. This may not be lock-free.
0421      *
0422      * \throws if memory allocator throws
0423      */
0424     template < std::size_t Extent >
0425     size_type push( boost::span< const T, Extent > t )
0426     {
0427         const T* end_pushed = push( t.begin(), t.end() );
0428         return std::distance( t.begin(), end_pushed );
0429     }
0430 
0431     /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
0432      *
0433      * \return iterator to the first element, which has not been pushed
0434      *
0435      * \note Operation is applied atomically
0436      * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
0437      * \throws if memory allocator throws
0438      */
0439     template < typename ConstIterator >
0440     ConstIterator bounded_push( ConstIterator begin, ConstIterator end )
0441     {
0442         return do_push< true, ConstIterator >( begin, end );
0443     }
0444 
0445     /** Pushes as many objects from the span as freelist node can be allocated.
0446      *
0447      * \return Number of elements pushed
0448      *
0449      * \note Operation is applied atomically
0450      * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
0451      * \throws if memory allocator throws
0452      */
0453     template < std::size_t Extent >
0454     size_type bounded_push( boost::span< const T, Extent > t )
0455     {
0456         const T* end_pushed = bounded_push( t.begin(), t.end() );
0457         return std::distance( t.begin(), end_pushed );
0458     }
0459 
0460 
0461     /** Pushes object t to the stack.
0462      *
0463      * \post object will be pushed to the stack, if internal node can be allocated
0464      * \returns true, if the push operation is successful.
0465      *
0466      * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node
0467      * will be allocated from the OS. This may not be lock-free.
0468      * \throws if memory allocator throws
0469      * */
0470     bool unsynchronized_push( const T& v )
0471     {
0472         node* newnode = pool.template construct< false, false >( v );
0473         if ( newnode == 0 )
0474             return false;
0475 
0476         link_nodes_unsafe( newnode, newnode );
0477         return true;
0478     }
0479 
0480     /// \copydoc boost::lockfree::stack::unsynchronized_push(const T& t)
0481     bool unsynchronized_push( T&& v )
0482     {
0483         node* newnode = pool.template construct< false, false >( std::forward< T >( v ) );
0484         if ( newnode == 0 )
0485             return false;
0486 
0487         link_nodes_unsafe( newnode, newnode );
0488         return true;
0489     }
0490 
0491     /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
0492      *
0493      * \return iterator to the first element, which has not been pushed
0494      *
0495      * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node
0496      * will be allocated from the OS. This may not be lock-free.
0497      * \throws if memory allocator throws
0498      */
0499     template < typename ConstIterator >
0500     ConstIterator unsynchronized_push( ConstIterator begin, ConstIterator end )
0501     {
0502         node*         new_top_node;
0503         node*         end_node;
0504         ConstIterator ret;
0505 
0506         std::tie( new_top_node, end_node ) = prepare_node_list< false, false >( begin, end, ret );
0507         if ( new_top_node )
0508             link_nodes_unsafe( new_top_node, end_node );
0509 
0510         return ret;
0511     }
0512 
0513     /** Pushes as many objects from the span as freelist node can be allocated.
0514      *
0515      * \return iterator to the first element, which has not been pushed
0516      *
0517      * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node
0518      * will be allocated from the OS. This may not be lock-free. \throws if memory allocator throws
0519      */
0520     template < std::size_t Extent >
0521     size_type unsynchronized_push( boost::span< const T, Extent > t )
0522     {
0523         const T* end_pushed = unsynchronized_push( t.begin(), t.end() );
0524         return std::distance( t.begin(), end_pushed );
0525     }
0526 
0527     /** Pops object from stack.
0528      *
0529      * \post if pop operation is successful, object will be copied to ret.
0530      * \returns true, if the pop operation is successful, false if stack was empty.
0531      *
0532      * \note Thread-safe and non-blocking
0533      *
0534      * */
0535     bool pop( T& ret )
0536     {
0537         return pop< T >( ret );
0538     }
0539 
0540     /** Pops object from stack.
0541      *
0542      * \pre type T must be convertible to U
0543      * \post if pop operation is successful, object will be copied to ret.
0544      * \returns true, if the pop operation is successful, false if stack was empty.
0545      *
0546      * \note Thread-safe and non-blocking
0547      *
0548      * */
0549     template < typename U, typename Enabler = std::enable_if< std::is_convertible< T, U >::value > >
0550     bool pop( U& ret )
0551     {
0552         return consume_one( [ & ]( T&& arg ) {
0553             ret = std::forward< T >( arg );
0554         } );
0555     }
0556 
0557 #if !defined( BOOST_NO_CXX17_HDR_OPTIONAL ) || defined( BOOST_DOXYGEN_INVOKED )
0558     /** Pops object from stack, returning a std::optional<>
0559      *
0560      * \returns `std::optional` with value if successful, `std::nullopt` if stack is empty.
0561      *
0562      * \note Thread-safe and non-blocking
0563      *
0564      * */
0565     std::optional< T > pop( uses_optional_t )
0566     {
0567         T to_dequeue;
0568         if ( pop( to_dequeue ) )
0569             return to_dequeue;
0570         else
0571             return std::nullopt;
0572     }
0573 
0574     /** Pops object from stack, returning a std::optional<>
0575      *
0576      * \pre type T must be convertible to U
0577      * \returns `std::optional` with value if successful, `std::nullopt` if stack is empty.
0578      *
0579      * \note Thread-safe and non-blocking
0580      *
0581      * */
0582     template < typename U >
0583     std::optional< U > pop( uses_optional_t )
0584     {
0585         U to_dequeue;
0586         if ( pop( to_dequeue ) )
0587             return to_dequeue;
0588         else
0589             return std::nullopt;
0590     }
0591 #endif
0592 
0593     /** Pops object from stack.
0594      *
0595      * \post if pop operation is successful, object will be copied to ret.
0596      * \returns true, if the pop operation is successful, false if stack was empty.
0597      *
0598      * \note Not thread-safe, but non-blocking
0599      *
0600      * */
0601     bool unsynchronized_pop( T& ret )
0602     {
0603         return unsynchronized_pop< T >( ret );
0604     }
0605 
0606     /** Pops object from stack.
0607      *
0608      * \pre type T must be convertible to U
0609      * \post if pop operation is successful, object will be copied to ret.
0610      * \returns true, if the pop operation is successful, false if stack was empty.
0611      *
0612      * \note Not thread-safe, but non-blocking
0613      *
0614      * */
0615     template < typename U, typename Enabler = std::enable_if< std::is_convertible< T, U >::value > >
0616     bool unsynchronized_pop( U& ret )
0617     {
0618         tagged_node_handle old_tos         = tos.load( detail::memory_order_relaxed );
0619         node*              old_tos_pointer = pool.get_pointer( old_tos );
0620 
0621         if ( !pool.get_pointer( old_tos ) )
0622             return false;
0623 
0624         node*              new_tos_ptr = pool.get_pointer( old_tos_pointer->next );
0625         tagged_node_handle new_tos( pool.get_handle( new_tos_ptr ), old_tos.get_next_tag() );
0626 
0627         tos.store( new_tos, memory_order_relaxed );
0628         ret = std::move( old_tos_pointer->v );
0629         pool.template destruct< false >( old_tos );
0630         return true;
0631     }
0632 
0633     /** consumes one element via a functor
0634      *
0635      *  pops one element from the stack and applies the functor on this object
0636      *
0637      * \returns true, if one element was consumed
0638      *
0639      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
0640      * */
0641     template < typename Functor >
0642     bool consume_one( Functor&& f )
0643     {
0644         tagged_node_handle old_tos = tos.load( detail::memory_order_consume );
0645 
0646         for ( ;; ) {
0647             node* old_tos_pointer = pool.get_pointer( old_tos );
0648             if ( !old_tos_pointer )
0649                 return false;
0650 
0651             tagged_node_handle new_tos( old_tos_pointer->next, old_tos.get_next_tag() );
0652 
0653             if ( tos.compare_exchange_weak( old_tos, new_tos ) ) {
0654                 f( std::move( old_tos_pointer->v ) );
0655                 pool.template destruct< true >( old_tos );
0656                 return true;
0657             }
0658         }
0659     }
0660 
0661     /** consumes all elements via a functor
0662      *
0663      * sequentially pops all elements from the stack and applies the functor on each object
0664      *
0665      * \returns number of elements that are consumed
0666      *
0667      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
0668      * */
0669     template < typename Functor >
0670     size_t consume_all( Functor&& f )
0671     {
0672         size_t element_count = 0;
0673         while ( consume_one( f ) )
0674             element_count += 1;
0675 
0676         return element_count;
0677     }
0678 
0679     /** consumes all elements via a functor
0680      *
0681      * atomically pops all elements from the stack and applies the functor on each object
0682      *
0683      * \returns number of elements that are consumed
0684      *
0685      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
0686      * */
0687     template < typename Functor >
0688     size_t consume_all_atomic( Functor&& f )
0689     {
0690         size_t             element_count = 0;
0691         tagged_node_handle old_tos       = tos.load( detail::memory_order_consume );
0692 
0693         for ( ;; ) {
0694             node* old_tos_pointer = pool.get_pointer( old_tos );
0695             if ( !old_tos_pointer )
0696                 return 0;
0697 
0698             tagged_node_handle new_tos( pool.null_handle(), old_tos.get_next_tag() );
0699 
0700             if ( tos.compare_exchange_weak( old_tos, new_tos ) )
0701                 break;
0702         }
0703 
0704         tagged_node_handle nodes_to_consume = old_tos;
0705 
0706         for ( ;; ) {
0707             node* node_pointer = pool.get_pointer( nodes_to_consume );
0708             f( std::move( node_pointer->v ) );
0709             element_count += 1;
0710 
0711             node* next_node = pool.get_pointer( node_pointer->next );
0712 
0713             if ( !next_node ) {
0714                 pool.template destruct< true >( nodes_to_consume );
0715                 break;
0716             }
0717 
0718             tagged_node_handle next( pool.get_handle( next_node ), nodes_to_consume.get_next_tag() );
0719             pool.template destruct< true >( nodes_to_consume );
0720             nodes_to_consume = next;
0721         }
0722 
0723         return element_count;
0724     }
0725 
0726     /** consumes all elements via a functor
0727      *
0728      * atomically pops all elements from the stack and applies the functor on each object in reversed order
0729      *
0730      * \returns number of elements that are consumed
0731      *
0732      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
0733      * */
0734     template < typename Functor >
0735     size_t consume_all_atomic_reversed( Functor&& f )
0736     {
0737         size_t             element_count = 0;
0738         tagged_node_handle old_tos       = tos.load( detail::memory_order_consume );
0739 
0740         for ( ;; ) {
0741             node* old_tos_pointer = pool.get_pointer( old_tos );
0742             if ( !old_tos_pointer )
0743                 return 0;
0744 
0745             tagged_node_handle new_tos( pool.null_handle(), old_tos.get_next_tag() );
0746 
0747             if ( tos.compare_exchange_weak( old_tos, new_tos ) )
0748                 break;
0749         }
0750 
0751         tagged_node_handle nodes_to_consume = old_tos;
0752 
0753         node*              last_node_pointer = NULL;
0754         tagged_node_handle nodes_in_reversed_order;
0755         for ( ;; ) {
0756             node* node_pointer = pool.get_pointer( nodes_to_consume );
0757             node* next_node    = pool.get_pointer( node_pointer->next );
0758 
0759             node_pointer->next = pool.get_handle( last_node_pointer );
0760             last_node_pointer  = node_pointer;
0761 
0762             if ( !next_node ) {
0763                 nodes_in_reversed_order = nodes_to_consume;
0764                 break;
0765             }
0766 
0767             tagged_node_handle next( pool.get_handle( next_node ), nodes_to_consume.get_next_tag() );
0768             nodes_to_consume = next;
0769         }
0770 
0771         for ( ;; ) {
0772             node* node_pointer = pool.get_pointer( nodes_in_reversed_order );
0773             f( std::move( node_pointer->v ) );
0774             element_count += 1;
0775 
0776             node* next_node = pool.get_pointer( node_pointer->next );
0777 
0778             if ( !next_node ) {
0779                 pool.template destruct< true >( nodes_in_reversed_order );
0780                 break;
0781             }
0782 
0783             tagged_node_handle next( pool.get_handle( next_node ), nodes_in_reversed_order.get_next_tag() );
0784             pool.template destruct< true >( nodes_in_reversed_order );
0785             nodes_in_reversed_order = next;
0786         }
0787 
0788         return element_count;
0789     }
0790     /**
0791      * \return true, if stack is empty.
0792      *
0793      * \note It only guarantees that at some point during the execution of the function the stack has been empty.
0794      *       It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
0795      * */
0796     bool empty( void ) const
0797     {
0798         return pool.get_pointer( tos.load() ) == NULL;
0799     }
0800 
0801 private:
0802 #ifndef BOOST_DOXYGEN_INVOKED
0803     detail::atomic< tagged_node_handle > tos;
0804 
0805     static const int padding_size = detail::cacheline_bytes - sizeof( tagged_node_handle );
0806     char             padding[ padding_size ];
0807 
0808     pool_t pool;
0809 #endif
0810 };
0811 
0812 }}     // namespace boost::lockfree
0813 
0814 #endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */