Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:39:18

0001 //  Copyright (C) 2008-2013 Tim Blechmann
0002 //
0003 //  Distributed under the Boost Software License, Version 1.0. (See
0004 //  accompanying file LICENSE_1_0.txt or copy at
0005 //  http://www.boost.org/LICENSE_1_0.txt)
0006 
0007 #ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
0008 #define BOOST_LOCKFREE_STACK_HPP_INCLUDED
0009 
0010 #include <boost/assert.hpp>
0011 #include <boost/checked_delete.hpp>
0012 #include <boost/core/allocator_access.hpp>
0013 #include <boost/core/no_exceptions_support.hpp>
0014 #include <boost/integer_traits.hpp>
0015 #include <boost/static_assert.hpp>
0016 #include <boost/tuple/tuple.hpp>
0017 #include <boost/type_traits/is_copy_constructible.hpp>
0018 
0019 #include <boost/lockfree/detail/atomic.hpp>
0020 #include <boost/lockfree/detail/copy_payload.hpp>
0021 #include <boost/lockfree/detail/freelist.hpp>
0022 #include <boost/lockfree/detail/parameter.hpp>
0023 #include <boost/lockfree/detail/tagged_ptr.hpp>
0024 
0025 #include <boost/lockfree/lockfree_forward.hpp>
0026 
0027 #ifdef BOOST_HAS_PRAGMA_ONCE
0028 #pragma once
0029 #endif
0030 
0031 namespace boost    {
0032 namespace lockfree {
0033 namespace detail   {
0034 
0035 typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
0036                               boost::parameter::optional<tag::capacity>
0037                              > stack_signature;
0038 
0039 }
0040 
0041 /** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
0042  *  construction/destruction has to be synchronized. It uses a freelist for memory management,
0043  *  freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
0044  *
0045  *  \b Policies:
0046  *
0047  *  - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
0048  *    Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
0049  *    If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
0050  *    by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index
0051  *    type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
0052  *    to achieve lock-freedom.
0053  *
0054  *  - \c boost::lockfree::capacity<>, optional <br>
0055  *    If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
0056  *    It this option implies \c fixed_sized<true>
0057  *
0058  *  - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
0059  *    Specifies the allocator that is used for the internal freelist
0060  *
0061  *  \b Requirements:
0062  *  - T must have a copy constructor
0063  * */
0064 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
0065 template <typename T, class A0, class A1, class A2>
0066 #else
0067 template <typename T, typename ...Options>
0068 #endif
0069 class stack
0070 {
0071 private:
0072 #ifndef BOOST_DOXYGEN_INVOKED
0073     BOOST_STATIC_ASSERT(boost::is_copy_constructible<T>::value);
0074 
0075 #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
0076     typedef typename detail::stack_signature::bind<A0, A1, A2>::type bound_args;
0077 #else
0078     typedef typename detail::stack_signature::bind<Options...>::type bound_args;
0079 #endif
0080 
0081     static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
0082     static const size_t capacity = detail::extract_capacity<bound_args>::capacity;
0083     static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
0084     static const bool node_based = !(has_capacity || fixed_sized);
0085     static const bool compile_time_sized = has_capacity;
0086 
0087     struct node
0088     {
0089         node(T const & val):
0090             v(val)
0091         {}
0092 
0093         typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_t;
0094         handle_t next;
0095         const T v;
0096     };
0097 
0098     typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
0099     typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
0100     typedef typename pool_t::tagged_node_handle tagged_node_handle;
0101 
0102     // check compile-time capacity
0103     BOOST_STATIC_ASSERT((mpl::if_c<has_capacity,
0104                                    mpl::bool_<capacity - 1 < boost::integer_traits<boost::uint16_t>::const_max>,
0105                                    mpl::true_
0106                                   >::type::value));
0107 
0108     struct implementation_defined
0109     {
0110         typedef node_allocator allocator;
0111         typedef std::size_t size_type;
0112     };
0113 
0114 #endif
0115 
0116     BOOST_DELETED_FUNCTION(stack(stack const&))
0117     BOOST_DELETED_FUNCTION(stack& operator= (stack const&))
0118 
0119 public:
0120     typedef T value_type;
0121     typedef typename implementation_defined::allocator allocator;
0122     typedef typename implementation_defined::size_type size_type;
0123 
0124     /**
0125      * \return true, if implementation is lock-free.
0126      *
0127      * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
0128      *          On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
0129      *          there is no possibility to provide a completely accurate implementation, because one would need to test
0130      *          every internal node, which is impossible if further nodes will be allocated from the operating system.
0131      *
0132      * */
0133     bool is_lock_free (void) const
0134     {
0135         return tos.is_lock_free() && pool.is_lock_free();
0136     }
0137 
0138     /** Construct a fixed-sized stack
0139      *
0140      *  \pre Must specify a capacity<> argument
0141      * */
0142     stack(void):
0143         pool(node_allocator(), capacity)
0144     {
0145         // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
0146         // this function and this function may be compiled even when it isn't being used.
0147         BOOST_ASSERT(has_capacity);
0148         initialize();
0149     }
0150 
0151     /** Construct a fixed-sized stack with a custom allocator
0152      *
0153      *  \pre Must specify a capacity<> argument
0154      * */
0155     template <typename U>
0156     explicit stack(typename boost::allocator_rebind<node_allocator, U>::type const & alloc):
0157         pool(alloc, capacity)
0158     {
0159         BOOST_STATIC_ASSERT(has_capacity);
0160         initialize();
0161     }
0162 
0163     /** Construct a fixed-sized stack with a custom allocator
0164      *
0165      *  \pre Must specify a capacity<> argument
0166      * */
0167     explicit stack(allocator const & alloc):
0168         pool(alloc, capacity)
0169     {
0170         // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
0171         // this function and this function may be compiled even when it isn't being used.
0172         BOOST_ASSERT(has_capacity);
0173         initialize();
0174     }
0175 
0176     /** Construct a variable-sized stack
0177      *
0178      *  Allocate n nodes initially for the freelist
0179      *
0180      *  \pre Must \b not specify a capacity<> argument
0181      * */
0182     explicit stack(size_type n):
0183         pool(node_allocator(), n)
0184     {
0185         // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
0186         // this function and this function may be compiled even when it isn't being used.
0187         BOOST_ASSERT(!has_capacity);
0188         initialize();
0189     }
0190 
0191     /** Construct a variable-sized stack with a custom allocator
0192      *
0193      *  Allocate n nodes initially for the freelist
0194      *
0195      *  \pre Must \b not specify a capacity<> argument
0196      * */
0197     template <typename U>
0198     stack(size_type n, typename boost::allocator_rebind<node_allocator, U>::type const & alloc):
0199         pool(alloc, n)
0200     {
0201         BOOST_STATIC_ASSERT(!has_capacity);
0202         initialize();
0203     }
0204 
0205     /** Allocate n nodes for freelist
0206      *
0207      *  \pre  only valid if no capacity<> argument given
0208      *  \note thread-safe, may block if memory allocator blocks
0209      *
0210      * */
0211     void reserve(size_type n)
0212     {
0213         // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
0214         // this function and this function may be compiled even when it isn't being used.
0215         BOOST_ASSERT(!has_capacity);
0216         pool.template reserve<true>(n);
0217     }
0218 
0219     /** Allocate n nodes for freelist
0220      *
0221      *  \pre  only valid if no capacity<> argument given
0222      *  \note not thread-safe, may block if memory allocator blocks
0223      *
0224      * */
0225     void reserve_unsafe(size_type n)
0226     {
0227         // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling
0228         // this function and this function may be compiled even when it isn't being used.
0229         BOOST_ASSERT(!has_capacity);
0230         pool.template reserve<false>(n);
0231     }
0232 
0233     /** Destroys stack, free all nodes from freelist.
0234      *
0235      *  \note not thread-safe
0236      *
0237      * */
0238     ~stack(void)
0239     {
0240         detail::consume_noop consume_functor;
0241         (void)consume_all(consume_functor);
0242     }
0243 
0244 private:
0245 #ifndef BOOST_DOXYGEN_INVOKED
0246     void initialize(void)
0247     {
0248         tos.store(tagged_node_handle(pool.null_handle(), 0));
0249     }
0250 
0251     void link_nodes_atomic(node * new_top_node, node * end_node)
0252     {
0253         tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
0254         for (;;) {
0255             tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
0256             end_node->next = pool.get_handle(old_tos);
0257 
0258             if (tos.compare_exchange_weak(old_tos, new_tos))
0259                 break;
0260         }
0261     }
0262 
0263     void link_nodes_unsafe(node * new_top_node, node * end_node)
0264     {
0265         tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
0266 
0267         tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
0268         end_node->next = pool.get_handle(old_tos);
0269 
0270         tos.store(new_tos, memory_order_relaxed);
0271     }
0272 
0273     template <bool Threadsafe, bool Bounded, typename ConstIterator>
0274     tuple<node*, node*> prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret)
0275     {
0276         ConstIterator it = begin;
0277         node * end_node = pool.template construct<Threadsafe, Bounded>(*it++);
0278         if (end_node == NULL) {
0279             ret = begin;
0280             return make_tuple<node*, node*>(NULL, NULL);
0281         }
0282 
0283         node * new_top_node = end_node;
0284         end_node->next = NULL;
0285 
0286         BOOST_TRY {
0287             /* link nodes */
0288             for (; it != end; ++it) {
0289                 node * newnode = pool.template construct<Threadsafe, Bounded>(*it);
0290                 if (newnode == NULL)
0291                     break;
0292                 newnode->next = new_top_node;
0293                 new_top_node = newnode;
0294             }
0295         } BOOST_CATCH (...) {
0296             for (node * current_node = new_top_node; current_node != NULL;) {
0297                 node * next = current_node->next;
0298                 pool.template destruct<Threadsafe>(current_node);
0299                 current_node = next;
0300             }
0301             BOOST_RETHROW;
0302         }
0303         BOOST_CATCH_END
0304 
0305         ret = it;
0306         return make_tuple(new_top_node, end_node);
0307     }
0308 #endif
0309 
0310 public:
0311     /** Pushes object t to the stack.
0312      *
0313      * \post object will be pushed to the stack, if internal node can be allocated
0314      * \returns true, if the push operation is successful.
0315      *
0316      * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
0317      *                    from the OS. This may not be lock-free.
0318      * \throws if memory allocator throws
0319      * */
0320     bool push(T const & v)
0321     {
0322         return do_push<false>(v);
0323     }
0324 
0325     /** Pushes object t to the stack.
0326      *
0327      * \post object will be pushed to the stack, if internal node can be allocated
0328      * \returns true, if the push operation is successful.
0329      *
0330      * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
0331      * */
0332     bool bounded_push(T const & v)
0333     {
0334         return do_push<true>(v);
0335     }
0336 
0337 #ifndef BOOST_DOXYGEN_INVOKED
0338 private:
0339     template <bool Bounded>
0340     bool do_push(T const & v)
0341     {
0342         node * newnode = pool.template construct<true, Bounded>(v);
0343         if (newnode == 0)
0344             return false;
0345 
0346         link_nodes_atomic(newnode, newnode);
0347         return true;
0348     }
0349 
0350     template <bool Bounded, typename ConstIterator>
0351     ConstIterator do_push(ConstIterator begin, ConstIterator end)
0352     {
0353         node * new_top_node;
0354         node * end_node;
0355         ConstIterator ret;
0356 
0357         tie(new_top_node, end_node) = prepare_node_list<true, Bounded>(begin, end, ret);
0358         if (new_top_node)
0359             link_nodes_atomic(new_top_node, end_node);
0360 
0361         return ret;
0362     }
0363 
0364 public:
0365 #endif
0366 
0367     /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
0368      *
0369      * \return iterator to the first element, which has not been pushed
0370      *
0371      * \note Operation is applied atomically
0372      * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
0373      *                    from the OS. This may not be lock-free.
0374      * \throws if memory allocator throws
0375      */
0376     template <typename ConstIterator>
0377     ConstIterator push(ConstIterator begin, ConstIterator end)
0378     {
0379         return do_push<false, ConstIterator>(begin, end);
0380     }
0381 
0382     /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
0383      *
0384      * \return iterator to the first element, which has not been pushed
0385      *
0386      * \note Operation is applied atomically
0387      * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
0388      * \throws if memory allocator throws
0389      */
0390     template <typename ConstIterator>
0391     ConstIterator bounded_push(ConstIterator begin, ConstIterator end)
0392     {
0393         return do_push<true, ConstIterator>(begin, end);
0394     }
0395 
0396 
0397     /** Pushes object t to the stack.
0398      *
0399      * \post object will be pushed to the stack, if internal node can be allocated
0400      * \returns true, if the push operation is successful.
0401      *
0402      * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
0403      *       from the OS. This may not be lock-free.
0404      * \throws if memory allocator throws
0405      * */
0406     bool unsynchronized_push(T const & v)
0407     {
0408         node * newnode = pool.template construct<false, false>(v);
0409         if (newnode == 0)
0410             return false;
0411 
0412         link_nodes_unsafe(newnode, newnode);
0413         return true;
0414     }
0415 
0416     /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
0417      *
0418      * \return iterator to the first element, which has not been pushed
0419      *
0420      * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
0421      *       from the OS. This may not be lock-free.
0422      * \throws if memory allocator throws
0423      */
0424     template <typename ConstIterator>
0425     ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end)
0426     {
0427         node * new_top_node;
0428         node * end_node;
0429         ConstIterator ret;
0430 
0431         tie(new_top_node, end_node) = prepare_node_list<false, false>(begin, end, ret);
0432         if (new_top_node)
0433             link_nodes_unsafe(new_top_node, end_node);
0434 
0435         return ret;
0436     }
0437 
0438 
0439     /** Pops object from stack.
0440      *
0441      * \post if pop operation is successful, object will be copied to ret.
0442      * \returns true, if the pop operation is successful, false if stack was empty.
0443      *
0444      * \note Thread-safe and non-blocking
0445      *
0446      * */
0447     bool pop(T & ret)
0448     {
0449         return pop<T>(ret);
0450     }
0451 
0452     /** Pops object from stack.
0453      *
0454      * \pre type T must be convertible to U
0455      * \post if pop operation is successful, object will be copied to ret.
0456      * \returns true, if the pop operation is successful, false if stack was empty.
0457      *
0458      * \note Thread-safe and non-blocking
0459      *
0460      * */
0461     template <typename U>
0462     bool pop(U & ret)
0463     {
0464         BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
0465         detail::consume_via_copy<U> consumer(ret);
0466 
0467         return consume_one(consumer);
0468     }
0469 
0470 
0471     /** Pops object from stack.
0472      *
0473      * \post if pop operation is successful, object will be copied to ret.
0474      * \returns true, if the pop operation is successful, false if stack was empty.
0475      *
0476      * \note Not thread-safe, but non-blocking
0477      *
0478      * */
0479     bool unsynchronized_pop(T & ret)
0480     {
0481         return unsynchronized_pop<T>(ret);
0482     }
0483 
0484     /** Pops object from stack.
0485      *
0486      * \pre type T must be convertible to U
0487      * \post if pop operation is successful, object will be copied to ret.
0488      * \returns true, if the pop operation is successful, false if stack was empty.
0489      *
0490      * \note Not thread-safe, but non-blocking
0491      *
0492      * */
0493     template <typename U>
0494     bool unsynchronized_pop(U & ret)
0495     {
0496         BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
0497         tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
0498         node * old_tos_pointer = pool.get_pointer(old_tos);
0499 
0500         if (!pool.get_pointer(old_tos))
0501             return false;
0502 
0503         node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next);
0504         tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_next_tag());
0505 
0506         tos.store(new_tos, memory_order_relaxed);
0507         detail::copy_payload(old_tos_pointer->v, ret);
0508         pool.template destruct<false>(old_tos);
0509         return true;
0510     }
0511 
0512     /** consumes one element via a functor
0513      *
0514      *  pops one element from the stack and applies the functor on this object
0515      *
0516      * \returns true, if one element was consumed
0517      *
0518      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
0519      * */
0520     template <typename Functor>
0521     bool consume_one(Functor & f)
0522     {
0523         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
0524 
0525         for (;;) {
0526             node * old_tos_pointer = pool.get_pointer(old_tos);
0527             if (!old_tos_pointer)
0528                 return false;
0529 
0530             tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
0531 
0532             if (tos.compare_exchange_weak(old_tos, new_tos)) {
0533                 f(old_tos_pointer->v);
0534                 pool.template destruct<true>(old_tos);
0535                 return true;
0536             }
0537         }
0538     }
0539 
0540     /// \copydoc boost::lockfree::stack::consume_one(Functor & rhs)
0541     template <typename Functor>
0542     bool consume_one(Functor const & f)
0543     {
0544         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
0545 
0546         for (;;) {
0547             node * old_tos_pointer = pool.get_pointer(old_tos);
0548             if (!old_tos_pointer)
0549                 return false;
0550 
0551             tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
0552 
0553             if (tos.compare_exchange_weak(old_tos, new_tos)) {
0554                 f(old_tos_pointer->v);
0555                 pool.template destruct<true>(old_tos);
0556                 return true;
0557             }
0558         }
0559     }
0560 
0561     /** consumes all elements via a functor
0562      *
0563      * sequentially pops all elements from the stack and applies the functor on each object
0564      *
0565      * \returns number of elements that are consumed
0566      *
0567      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
0568      * */
0569     template <typename Functor>
0570     size_t consume_all(Functor & f)
0571     {
0572         size_t element_count = 0;
0573         while (consume_one(f))
0574             element_count += 1;
0575 
0576         return element_count;
0577     }
0578 
0579     /// \copydoc boost::lockfree::stack::consume_all(Functor & rhs)
0580     template <typename Functor>
0581     size_t consume_all(Functor const & f)
0582     {
0583         size_t element_count = 0;
0584         while (consume_one(f))
0585             element_count += 1;
0586 
0587         return element_count;
0588     }
0589 
0590     /** consumes all elements via a functor
0591      *
0592      * atomically pops all elements from the stack and applies the functor on each object
0593      *
0594      * \returns number of elements that are consumed
0595      *
0596      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
0597      * */
0598     template <typename Functor>
0599     size_t consume_all_atomic(Functor & f)
0600     {
0601         size_t element_count = 0;
0602         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
0603 
0604         for (;;) {
0605             node * old_tos_pointer = pool.get_pointer(old_tos);
0606             if (!old_tos_pointer)
0607                 return 0;
0608 
0609             tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
0610 
0611             if (tos.compare_exchange_weak(old_tos, new_tos))
0612                 break;
0613         }
0614 
0615         tagged_node_handle nodes_to_consume = old_tos;
0616 
0617         for(;;) {
0618             node * node_pointer = pool.get_pointer(nodes_to_consume);
0619             f(node_pointer->v);
0620             element_count += 1;
0621 
0622             node * next_node = pool.get_pointer(node_pointer->next);
0623 
0624             if (!next_node) {
0625                 pool.template destruct<true>(nodes_to_consume);
0626                 break;
0627             }
0628 
0629             tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
0630             pool.template destruct<true>(nodes_to_consume);
0631             nodes_to_consume = next;
0632         }
0633 
0634         return element_count;
0635     }
0636 
0637     /// \copydoc boost::lockfree::stack::consume_all_atomic(Functor & rhs)
0638     template <typename Functor>
0639     size_t consume_all_atomic(Functor const & f)
0640     {
0641         size_t element_count = 0;
0642         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
0643 
0644         for (;;) {
0645             node * old_tos_pointer = pool.get_pointer(old_tos);
0646             if (!old_tos_pointer)
0647                 return 0;
0648 
0649             tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
0650 
0651             if (tos.compare_exchange_weak(old_tos, new_tos))
0652                 break;
0653         }
0654 
0655         tagged_node_handle nodes_to_consume = old_tos;
0656 
0657         for(;;) {
0658             node * node_pointer = pool.get_pointer(nodes_to_consume);
0659             f(node_pointer->v);
0660             element_count += 1;
0661 
0662             node * next_node = pool.get_pointer(node_pointer->next);
0663 
0664             if (!next_node) {
0665                 pool.template destruct<true>(nodes_to_consume);
0666                 break;
0667             }
0668 
0669             tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
0670             pool.template destruct<true>(nodes_to_consume);
0671             nodes_to_consume = next;
0672         }
0673 
0674         return element_count;
0675     }
0676 
0677     /** consumes all elements via a functor
0678      *
0679      * atomically pops all elements from the stack and applies the functor on each object in reversed order
0680      *
0681      * \returns number of elements that are consumed
0682      *
0683      * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
0684      * */
0685     template <typename Functor>
0686     size_t consume_all_atomic_reversed(Functor & f)
0687     {
0688         size_t element_count = 0;
0689         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
0690 
0691         for (;;) {
0692             node * old_tos_pointer = pool.get_pointer(old_tos);
0693             if (!old_tos_pointer)
0694                 return 0;
0695 
0696             tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
0697 
0698             if (tos.compare_exchange_weak(old_tos, new_tos))
0699                 break;
0700         }
0701 
0702         tagged_node_handle nodes_to_consume = old_tos;
0703 
0704         node * last_node_pointer = NULL;
0705         tagged_node_handle nodes_in_reversed_order;
0706         for(;;) {
0707             node * node_pointer = pool.get_pointer(nodes_to_consume);
0708             node * next_node    = pool.get_pointer(node_pointer->next);
0709 
0710             node_pointer->next  = pool.get_handle(last_node_pointer);
0711             last_node_pointer   = node_pointer;
0712 
0713             if (!next_node) {
0714                 nodes_in_reversed_order = nodes_to_consume;
0715                 break;
0716             }
0717 
0718             tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
0719             nodes_to_consume = next;
0720         }
0721 
0722         for(;;) {
0723             node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
0724             f(node_pointer->v);
0725             element_count += 1;
0726 
0727             node * next_node = pool.get_pointer(node_pointer->next);
0728 
0729             if (!next_node) {
0730                 pool.template destruct<true>(nodes_in_reversed_order);
0731                 break;
0732             }
0733 
0734             tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
0735             pool.template destruct<true>(nodes_in_reversed_order);
0736             nodes_in_reversed_order = next;
0737         }
0738 
0739         return element_count;
0740     }
0741 
0742     /// \copydoc boost::lockfree::stack::consume_all_atomic_reversed(Functor & rhs)
0743     template <typename Functor>
0744     size_t consume_all_atomic_reversed(Functor const & f)
0745     {
0746         size_t element_count = 0;
0747         tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
0748 
0749         for (;;) {
0750             node * old_tos_pointer = pool.get_pointer(old_tos);
0751             if (!old_tos_pointer)
0752                 return 0;
0753 
0754             tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag());
0755 
0756             if (tos.compare_exchange_weak(old_tos, new_tos))
0757                 break;
0758         }
0759 
0760         tagged_node_handle nodes_to_consume = old_tos;
0761 
0762         node * last_node_pointer = NULL;
0763         tagged_node_handle nodes_in_reversed_order;
0764         for(;;) {
0765             node * node_pointer = pool.get_pointer(nodes_to_consume);
0766             node * next_node    = pool.get_pointer(node_pointer->next);
0767 
0768             node_pointer->next  = pool.get_handle(last_node_pointer);
0769             last_node_pointer   = node_pointer;
0770 
0771             if (!next_node) {
0772                 nodes_in_reversed_order = nodes_to_consume;
0773                 break;
0774             }
0775 
0776             tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag());
0777             nodes_to_consume = next;
0778         }
0779 
0780         for(;;) {
0781             node * node_pointer = pool.get_pointer(nodes_in_reversed_order);
0782             f(node_pointer->v);
0783             element_count += 1;
0784 
0785             node * next_node = pool.get_pointer(node_pointer->next);
0786 
0787             if (!next_node) {
0788                 pool.template destruct<true>(nodes_in_reversed_order);
0789                 break;
0790             }
0791 
0792             tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag());
0793             pool.template destruct<true>(nodes_in_reversed_order);
0794             nodes_in_reversed_order = next;
0795         }
0796 
0797         return element_count;
0798     }
0799     /**
0800      * \return true, if stack is empty.
0801      *
0802      * \note It only guarantees that at some point during the execution of the function the stack has been empty.
0803      *       It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
0804      * */
0805     bool empty(void) const
0806     {
0807         return pool.get_pointer(tos.load()) == NULL;
0808     }
0809 
0810 private:
0811 #ifndef BOOST_DOXYGEN_INVOKED
0812     detail::atomic<tagged_node_handle> tos;
0813 
0814     static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
0815     char padding[padding_size];
0816 
0817     pool_t pool;
0818 #endif
0819 };
0820 
0821 } /* namespace lockfree */
0822 } /* namespace boost */
0823 
0824 #endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */