Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-09-17 08:34:10

0001 //////////////////////////////////////////////////////////////////////////////
0002 //
0003 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
0004 // Software License, Version 1.0. (See accompanying file
0005 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
0006 //
0007 // See http://www.boost.org/libs/interprocess for documentation.
0008 //
0009 //////////////////////////////////////////////////////////////////////////////
0010 
0011 #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
0012 #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
0013 
0014 #ifndef BOOST_CONFIG_HPP
0015 #  include <boost/config.hpp>
0016 #endif
0017 #
0018 #if defined(BOOST_HAS_PRAGMA_ONCE)
0019 #  pragma once
0020 #endif
0021 
0022 #include <boost/interprocess/detail/config_begin.hpp>
0023 #include <boost/interprocess/detail/workaround.hpp>
0024 
0025 // interprocess
0026 #include <boost/interprocess/containers/allocation_type.hpp>
0027 #include <boost/interprocess/exceptions.hpp>
0028 #include <boost/interprocess/interprocess_fwd.hpp>
0029 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
0030 #include <boost/interprocess/offset_ptr.hpp>
0031 #include <boost/interprocess/sync/scoped_lock.hpp>
0032 // interprocess/detail
0033 #include <boost/interprocess/detail/min_max.hpp>
0034 #include <boost/interprocess/detail/math_functions.hpp>
0035 #include <boost/interprocess/detail/type_traits.hpp>
0036 #include <boost/interprocess/detail/utilities.hpp>
0037 // container
0038 #include <boost/container/detail/multiallocation_chain.hpp>
0039 // container/detail
0040 #include <boost/container/detail/placement_new.hpp>
0041 // move/detail
0042 #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
0043 #include <boost/move/detail/force_ptr.hpp> //make_unsigned, alignment_of
0044 // intrusive
0045 #include <boost/intrusive/pointer_traits.hpp>
0046 #include <boost/intrusive/set.hpp>
0047 // other boost
0048 #include <boost/assert.hpp>
0049 // std
0050 #include <climits>
0051 #include <cstring>
0052 
0053 //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0054 //to maintain ABI compatible with the original version
0055 //ABI had to be updated to fix compatibility issues when
0056 //sharing shared memory between 32 adn 64 bit processes.
0057 
0058 //!\file
0059 //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
0060 //!objects in shared memory. This class is intended as a base class for single segment
0061 //!and multi-segment implementations.
0062 
0063 namespace boost {
0064 namespace interprocess {
0065 
0066 //!This class implements an algorithm that stores the free nodes in a red-black tree
0067 //!to have logarithmic search/insert times.
0068 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0069 class rbtree_best_fit
0070 {
0071    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0072    //Non-copyable
0073    rbtree_best_fit();
0074    rbtree_best_fit(const rbtree_best_fit &);
0075    rbtree_best_fit &operator=(const rbtree_best_fit &);
0076 
0077    private:
0078    struct block_ctrl;
0079    typedef typename boost::intrusive::
0080       pointer_traits<VoidPointer>::template
0081          rebind_pointer<block_ctrl>::type                   block_ctrl_ptr;
0082 
0083    typedef typename boost::intrusive::
0084       pointer_traits<VoidPointer>::template
0085          rebind_pointer<char>::type                         char_ptr;
0086 
0087    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
0088 
0089    public:
0090    //!Shared mutex family used for the rest of the Interprocess framework
0091    typedef MutexFamily        mutex_family;
0092    //!Pointer type to be used with the rest of the Interprocess framework
0093    typedef VoidPointer        void_pointer;
0094    typedef ipcdetail::basic_multiallocation_chain<VoidPointer>  multiallocation_chain;
0095 
0096    typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
0097    typedef typename boost::container::dtl::make_unsigned<difference_type>::type     size_type;
0098 
0099    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0100 
0101    private:
0102 
0103    typedef typename bi::make_set_base_hook
0104       < bi::void_pointer<VoidPointer>
0105       , bi::optimize_size<true>
0106       , bi::link_mode<bi::normal_link> >::type           TreeHook;
0107 
0108    struct SizeHolder
0109    {
0110       static const size_type size_mask = size_type(-1) >> 2;
0111       //!Previous block's memory size (including block_ctrl
0112       //!header) in Alignment units. This field (UsableByPreviousChunk bytes)
0113       //!is OVERWRITTEN by the previous block if allocated (m_prev_allocated)
0114       size_type m_prev_size;
0115       //!This block's memory size (including block_ctrl
0116       //!header) in Alignment units
0117       size_type m_size      :  sizeof(size_type)*CHAR_BIT - 2;
0118       size_type m_prev_allocated :  1;
0119       size_type m_allocated :  1;
0120    };
0121 
0122    //!Block control structure
0123    struct block_ctrl
0124       :  public SizeHolder
0125       //This tree hook is overwritten when this block is used
0126       , public TreeHook
0127    {
0128       block_ctrl()
0129       {
0130          this->SizeHolder::m_size = 0;
0131          this->SizeHolder::m_allocated = 0;
0132          this->SizeHolder::m_prev_allocated = 0;
0133       }
0134 
0135       friend bool operator<(const block_ctrl &a, const block_ctrl &b)
0136       {  return a.SizeHolder::m_size < b.SizeHolder::m_size;  }
0137 
0138       friend bool operator==(const block_ctrl &a, const block_ctrl &b)
0139       {  return a.SizeHolder::m_size == b.SizeHolder::m_size;  }
0140    };
0141 
0142    struct size_block_ctrl_compare
0143    {
0144       bool operator()(size_type size, const block_ctrl &block) const
0145       {  return size < block.m_size;  }
0146 
0147       bool operator()(const block_ctrl &block, size_type size) const
0148       {  return block.m_size < size;  }
0149    };
0150 
0151    //!Shared mutex to protect memory allocate/deallocate
0152    typedef typename MutexFamily::mutex_type                       mutex_type;
0153    typedef typename bi::make_multiset
0154       <block_ctrl, bi::base_hook<TreeHook> >::type                Imultiset;
0155 
0156    typedef typename Imultiset::iterator                           imultiset_iterator;
0157    typedef typename Imultiset::const_iterator                     imultiset_const_iterator;
0158 
0159    //!This struct includes needed data and derives from
0160    //!mutex_type to allow EBO when using null mutex_type
0161    struct header_t : public mutex_type
0162    {
0163       Imultiset            m_imultiset;
0164 
0165       //!The extra size required by the segment
0166       size_type            m_extra_hdr_bytes;
0167       //!Allocated bytes for internal checking
0168       size_type            m_allocated;
0169       //!The size of the memory segment
0170       size_type            m_size;
0171    }  m_header;
0172 
0173    friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
0174 
0175    typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
0176 
0177    public:
0178    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
0179 
0180    //!Constructor. "size" is the total size of the managed memory segment,
0181    //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
0182    //!offset that the allocator should not use at all.
0183    rbtree_best_fit           (size_type size, size_type extra_hdr_bytes);
0184 
0185    //!Destructor.
0186    ~rbtree_best_fit();
0187 
0188    //!Obtains the minimum size needed by the algorithm
0189    static size_type get_min_size (size_type extra_hdr_bytes);
0190 
0191    //Functions for single segment management
0192 
0193    //!Allocates bytes, returns 0 if there is not more memory.
0194    //!Returned memory is aligned to Alignment bytes.
0195    void* allocate             (size_type nbytes);
0196 
0197    //!Deallocates previously allocated bytes
0198    void   deallocate(void* addr);
0199 
0200    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0201 
0202    //Experimental. Dont' use
0203 
0204    //!Multiple element allocation, same size
0205    //!Experimental. Dont' use
0206    void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
0207    {
0208       //-----------------------
0209       boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0210       //-----------------------
0211       algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
0212    }
0213 
0214    //!Multiple element allocation, different size
0215    //!Experimental. Dont' use
0216    void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
0217    {
0218       //-----------------------
0219       boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0220       //-----------------------
0221       algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
0222    }
0223 
0224    //!Multiple element allocation, different size
0225    //!Experimental. Dont' use
0226    void deallocate_many(multiallocation_chain &chain);
0227 
0228    template<class T>
0229    T* allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
0230       size_type& prefer_in_recvd_out_size, T*& reuse);
0231 
0232    void* raw_allocation_command(boost::interprocess::allocation_type command, size_type limit_object,
0233       size_type& prefer_in_recvd_out_size,
0234       void*& reuse_ptr, size_type sizeof_object = 1);
0235 
0236    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
0237 
0238    //!Returns the size of the memory segment
0239    size_type get_size()  const;
0240 
0241    //!Returns the number of free bytes of the segment
0242    size_type get_free_memory()  const;
0243 
0244    //!Initializes to zero all the memory that's not in use.
0245    //!This function is normally used for security reasons.
0246    void zero_free_memory();
0247 
0248    //!Increases managed memory in
0249    //!extra_size bytes more
0250    void grow(size_type extra_size);
0251 
0252    //!Decreases managed memory as much as possible
0253    void shrink_to_fit();
0254 
0255    //!Returns true if all allocated memory has been deallocated
0256    bool all_memory_deallocated();
0257 
0258    //!Makes an internal sanity check
0259    //!and returns true if success
0260    bool check_sanity();
0261 
0262    //!Returns the size of the buffer previously allocated pointed by ptr
0263    size_type size(const void *ptr) const;
0264 
0265    //!Allocates aligned bytes, returns 0 if there is not more memory.
0266    //!Alignment must be power of 2
0267    void* allocate_aligned     (size_type nbytes, size_type alignment);
0268 
0269    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0270    private:
0271    static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
0272 
0273    block_ctrl *priv_first_block();
0274 
0275    block_ctrl *priv_end_block();
0276 
0277    void* priv_allocation_command(boost::interprocess::allocation_type command,   size_type limit_size,
0278                         size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
0279 
0280 
0281    //!Real allocation algorithm with min allocation option
0282    void * priv_allocate( boost::interprocess::allocation_type command
0283                        , size_type limit_size, size_type &prefer_in_recvd_out_size
0284                        , void *&reuse_ptr, size_type backwards_multiple = 1);
0285 
0286    //!Obtains the block control structure of the user buffer
0287    static block_ctrl *priv_get_block(const void *ptr);
0288 
0289    //!Obtains the pointer returned to the user from the block control
0290    static void *priv_get_user_buffer(const block_ctrl *block);
0291 
0292    //!Returns the number of total units that a user buffer
0293    //!of "userbytes" bytes really occupies (including header)
0294    static size_type priv_get_total_units(size_type userbytes);
0295 
0296    //!Real expand function implementation
0297    bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
0298 
0299    //!Real expand to both sides implementation
0300    void* priv_expand_both_sides(boost::interprocess::allocation_type command
0301                                ,size_type min_size
0302                                ,size_type &prefer_in_recvd_out_size
0303                                ,void *reuse_ptr
0304                                ,bool only_preferred_backwards
0305                                ,size_type backwards_multiple);
0306 
0307    //!Returns true if the previous block is allocated
0308    bool priv_is_prev_allocated(block_ctrl *ptr);
0309 
0310    //!Get a pointer of the "end" block from the first block of the segment
0311    static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
0312 
0313    //!Get a pointer of the "first" block from the end block of the segment
0314    static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
0315 
0316    //!Get poitner of the previous block (previous block must be free)
0317    static block_ctrl * priv_prev_block(block_ctrl *ptr);
0318 
0319    //!Get the size in the tail of the previous block
0320    static block_ctrl * priv_next_block(block_ctrl *ptr);
0321 
0322    //!Check if this block is free (not allocated)
0323    bool priv_is_allocated_block(block_ctrl *ptr);
0324 
0325    //!Marks the block as allocated
0326    void priv_mark_as_allocated_block(block_ctrl *ptr);
0327 
0328    //!Marks the block as allocated
0329    void priv_mark_new_allocated_block(block_ctrl *ptr)
0330    {  return priv_mark_as_allocated_block(ptr); }
0331 
0332    //!Marks the block as allocated
0333    void priv_mark_as_free_block(block_ctrl *ptr);
0334 
0335    //!Checks if block has enough memory and splits/unlinks the block
0336    //!returning the address to the users
0337    void* priv_check_and_allocate(size_type units
0338                                 ,block_ctrl* block
0339                                 ,size_type &received_size);
0340    //!Real deallocation algorithm
0341    void priv_deallocate(void *addr);
0342 
0343    //!Makes a new memory portion available for allocation
0344    void priv_add_segment(void *addr, size_type size);
0345 
0346    public:
0347 
0348    static const size_type Alignment = !MemAlignment
0349       ? size_type(::boost::container::dtl::alignment_of
0350                   < ::boost::container::dtl::max_align_t>::value)
0351       : size_type(MemAlignment)
0352       ;
0353 
0354    private:
0355    //Due to embedded bits in size, Alignment must be at least 4
0356    BOOST_INTERPROCESS_STATIC_ASSERT((Alignment >= 4));
0357    //Due to rbtree size optimizations, Alignment must have at least pointer alignment
0358    BOOST_INTERPROCESS_STATIC_ASSERT((Alignment >= ::boost::container::dtl::alignment_of<void_pointer>::value));
0359    static const size_type AlignmentMask = (Alignment - 1);
0360    static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
0361    static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
0362    static const size_type AllocatedCtrlBytes  = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
0363    static const size_type AllocatedCtrlUnits  = AllocatedCtrlBytes/Alignment;
0364    static const size_type EndCtrlBlockBytes   = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
0365    static const size_type EndCtrlBlockUnits   = EndCtrlBlockBytes/Alignment;
0366    static const size_type UsableByPreviousChunk   = sizeof(size_type);
0367 
0368    //Make sure the maximum alignment is power of two
0369    BOOST_INTERPROCESS_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
0370    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
0371    public:
0372    static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
0373 };
0374 
0375 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0376 
0377 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0378 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0379        rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0380    ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
0381 {
0382    size_type uint_this      = (std::size_t)this_ptr;
0383    size_type main_hdr_end   = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
0384    size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
0385    size_type block1_off = aligned_main_hdr_end -  uint_this;
0386    algo_impl_t::assert_alignment(aligned_main_hdr_end);
0387    algo_impl_t::assert_alignment(uint_this + block1_off);
0388    return block1_off;
0389 }
0390 
0391 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0392 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0393    priv_add_segment(void *addr, size_type segment_size)
0394 {
0395    //Check alignment
0396    algo_impl_t::check_alignment(addr);
0397    //Check size
0398    BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
0399 
0400    //Initialize the first big block and the "end" node
0401    block_ctrl *first_big_block = ::new(addr, boost_container_new_t()) block_ctrl;
0402    first_big_block->m_size = (segment_size/Alignment - EndCtrlBlockUnits) & block_ctrl::size_mask;
0403    BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
0404 
0405    //The "end" node is just a node of size 0 with the "end" bit set
0406    SizeHolder *end_block =
0407       ::new(reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment, boost_container_new_t()) SizeHolder;
0408 
0409    //This will overwrite the prev part of the "end" node
0410    priv_mark_as_free_block (first_big_block);
0411    #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0412    first_big_block->m_prev_size = end_block->m_size =
0413       size_type(reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignment) & block_ctrl::size_mask;
0414    #else
0415    first_big_block->m_prev_size = end_block->m_size =
0416       size_type(reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment & block_ctrl::size_mask;
0417    #endif
0418    end_block->m_allocated = 1;
0419    first_big_block->m_prev_allocated = 1;
0420 
0421    BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
0422    BOOST_ASSERT(priv_prev_block((block_ctrl*)end_block) == first_big_block);
0423    BOOST_ASSERT(priv_first_block() == first_big_block);
0424    BOOST_ASSERT(priv_end_block() == end_block);
0425 
0426    //Some check to validate the algorithm, since it makes some assumptions
0427    //to optimize the space wasted in bookkeeping:
0428 
0429    //Check that the sizes of the header are placed before the rbtree
0430    BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
0431           < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
0432    //Insert it in the intrusive containers
0433    m_header.m_imultiset.insert(*first_big_block);
0434 }
0435 
0436 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0437 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
0438        rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0439    ::priv_first_block()
0440 {
0441    const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0442    return move_detail::force_ptr<block_ctrl*>(reinterpret_cast<char*>(this) + block1_off);
0443 }
0444 
0445 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0446 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
0447        rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0448    ::priv_end_block()
0449 {
0450    const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0451    const size_type original_first_block_size = (m_header.m_size - block1_off)/Alignment - EndCtrlBlockUnits;
0452    block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
0453       (reinterpret_cast<char*>(this) + block1_off + original_first_block_size*Alignment);
0454    return end_block;
0455 }
0456 
0457 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0458 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0459    rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
0460 {
0461    //Initialize the header
0462    m_header.m_allocated       = 0;
0463    m_header.m_size            = segment_size;
0464    m_header.m_extra_hdr_bytes = extra_hdr_bytes;
0465 
0466    //Now write calculate the offset of the first big block that will
0467    //cover the whole segment
0468    BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
0469    size_type block1_off  = priv_first_block_offset_from_this(this, extra_hdr_bytes);
0470    priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
0471 }
0472 
0473 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0474 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
0475 {
0476    //There is a memory leak!
0477 //   BOOST_ASSERT(m_header.m_allocated == 0);
0478 //   BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
0479 }
0480 
0481 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0482 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
0483 {
0484    //Get the address of the first block
0485    block_ctrl *first_block = priv_first_block();
0486    block_ctrl *old_end_block = priv_end_block();
0487    size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
0488                                     reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
0489 
0490    //Update managed buffer's size
0491    m_header.m_size += extra_size;
0492 
0493    //We need at least BlockCtrlBytes blocks to create a new block
0494    if((m_header.m_size - old_border_offset) < BlockCtrlBytes){
0495       return;
0496    }
0497 
0498    //Now create a new block between the old end and the new end
0499    size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
0500    block_ctrl *new_end_block = move_detail::force_ptr<block_ctrl*>
0501       (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
0502 
0503    //the last and first block are special:
0504    //new_end_block->m_size & first_block->m_prev_size store the absolute value
0505    //between them
0506    new_end_block->m_allocated = 1;
0507    #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0508    new_end_block->m_size      = size_type(reinterpret_cast<char*>(first_block) -
0509                                           reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
0510    #else
0511    new_end_block->m_size      = size_type(reinterpret_cast<char*>(new_end_block) -
0512                                           reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
0513    #endif
0514    first_block->m_prev_size = new_end_block->m_size;
0515    first_block->m_prev_allocated = 1;
0516    BOOST_ASSERT(new_end_block == priv_end_block());
0517 
0518    //The old end block is the new block
0519    block_ctrl *new_block = old_end_block;
0520    new_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
0521                                  reinterpret_cast<char*>(new_block))/Alignment & block_ctrl::size_mask;
0522    BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
0523    priv_mark_as_allocated_block(new_block);
0524    BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
0525 
0526    m_header.m_allocated += (size_type)new_block->m_size*Alignment;
0527 
0528    //Now deallocate the newly created block
0529    this->priv_deallocate(priv_get_user_buffer(new_block));
0530 }
0531 
0532 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0533 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
0534 {
0535    //Get the address of the first block
0536    block_ctrl *first_block = priv_first_block();
0537    algo_impl_t::assert_alignment(first_block);
0538 
0539    //block_ctrl *old_end_block = priv_end_block(first_block);
0540    block_ctrl *old_end_block = priv_end_block();
0541    algo_impl_t::assert_alignment(old_end_block);
0542    size_type old_end_block_size = old_end_block->m_size;
0543 
0544    void *unique_buffer = 0;
0545    block_ctrl *last_block;
0546    //Check if no memory is allocated between the first and last block
0547    if(priv_next_block(first_block) == old_end_block){
0548       //If so check if we can allocate memory
0549       size_type ignore_recvd = 0;
0550       void *ignore_reuse = 0;
0551       unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
0552       //If not, return, we can't shrink
0553       if(!unique_buffer)
0554          return;
0555       //If we can, mark the position just after the new allocation as the new end
0556       algo_impl_t::assert_alignment(unique_buffer);
0557       block_ctrl *unique_block = priv_get_block(unique_buffer);
0558       BOOST_ASSERT(priv_is_allocated_block(unique_block));
0559       algo_impl_t::assert_alignment(unique_block);
0560       last_block = priv_next_block(unique_block);
0561       BOOST_ASSERT(!priv_is_allocated_block(last_block));
0562       algo_impl_t::assert_alignment(last_block);
0563    }
0564    else{
0565       //If memory is allocated, check if the last block is allocated
0566       if(priv_is_prev_allocated(old_end_block))
0567          return;
0568       //If not, mark last block after the free block
0569       last_block = priv_prev_block(old_end_block);
0570    }
0571 
0572    size_type last_block_size = last_block->m_size;
0573 
0574    //Erase block from the free tree, since we will erase it
0575    m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
0576 
0577    size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
0578                                        reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
0579 
0580    block_ctrl *new_end_block = last_block;
0581    algo_impl_t::assert_alignment(new_end_block);
0582 
0583    //Write new end block attributes
0584    #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0585    new_end_block->m_size = 
0586       size_type(reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
0587    first_block->m_prev_size = new_end_block->m_size;
0588    #else
0589    new_end_block->m_size =
0590       size_type(reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
0591    first_block->m_prev_size = new_end_block->m_size;
0592    #endif
0593 
0594    new_end_block->m_allocated = 1;
0595    (void)last_block_size;
0596    (void)old_end_block_size;
0597    BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
0598 
0599    //Update managed buffer's size
0600    m_header.m_size = shrunk_border_offset & block_ctrl::size_mask;
0601    BOOST_ASSERT(priv_end_block() == new_end_block);
0602    if(unique_buffer)
0603       priv_deallocate(unique_buffer);
0604 }
0605 
0606 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0607 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0608 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size()  const
0609 {  return m_header.m_size;  }
0610 
0611 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0612 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0613 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory()  const
0614 {
0615    return m_header.m_size - m_header.m_allocated -
0616       priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0617 }
0618 
0619 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0620 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0621 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0622    get_min_size (size_type extra_hdr_bytes)
0623 {
0624    return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
0625            algo_impl_t::ceil_units(extra_hdr_bytes) +
0626            BlockCtrlUnits + EndCtrlBlockUnits)*Alignment;
0627 }
0628 
0629 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0630 inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0631     all_memory_deallocated()
0632 {
0633    //-----------------------
0634    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0635    //-----------------------
0636    size_type block1_off  =
0637       priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0638 
0639    return m_header.m_allocated == 0 &&
0640       m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
0641        (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
0642        && m_header.m_imultiset.begin()->m_size ==
0643          (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
0644 }
0645 
0646 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0647 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0648     check_sanity()
0649 {
0650    //-----------------------
0651    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0652    //-----------------------
0653    imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
0654 
0655    size_type free_memory = 0;
0656 
0657    //Iterate through all blocks obtaining their size
0658    for(; ib != ie; ++ib){
0659       free_memory += (size_type)ib->m_size*Alignment;
0660       if(!algo_impl_t::check_alignment(&*ib))
0661          return false;
0662    }
0663 
0664    //Check allocated bytes are less than size
0665    if(m_header.m_allocated > m_header.m_size){
0666       return false;
0667    }
0668 
0669    size_type block1_off  =
0670       priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0671 
0672    //Check free bytes are less than size
0673    if(free_memory > (m_header.m_size - block1_off)){
0674       return false;
0675    }
0676    return true;
0677 }
0678 
0679 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0680 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0681    allocate(size_type nbytes)
0682 {
0683    //-----------------------
0684    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0685    //-----------------------
0686    size_type ignore_recvd = nbytes;
0687    void *ignore_reuse = 0;
0688    return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
0689 }
0690 
0691 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0692 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0693    allocate_aligned(size_type nbytes, size_type alignment)
0694 {
0695    //-----------------------
0696    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0697    //-----------------------
0698    return algo_impl_t::allocate_aligned(this, nbytes, alignment);
0699 }
0700 
0701 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0702 template<class T>
0703 inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0704    allocation_command  (boost::interprocess::allocation_type command,   size_type limit_size,
0705                         size_type &prefer_in_recvd_out_size, T *&reuse)
0706 {
0707    void* raw_reuse = reuse;
0708    void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
0709    reuse = static_cast<T*>(raw_reuse);
0710    BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
0711    return static_cast<T*>(ret);
0712 }
0713 
0714 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0715 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0716    raw_allocation_command  (boost::interprocess::allocation_type command,   size_type limit_objects,
0717                         size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
0718 {
0719    size_type const preferred_objects = prefer_in_recvd_out_objects;
0720    if(!sizeof_object)
0721       return reuse_ptr = 0, static_cast<void*>(0);
0722    if(command & boost::interprocess::try_shrink_in_place){
0723       if(!reuse_ptr)  return static_cast<void*>(0);
0724       const bool success = algo_impl_t::try_shrink
0725          ( this, reuse_ptr, limit_objects*sizeof_object
0726          , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
0727       prefer_in_recvd_out_objects /= sizeof_object;
0728       return success ? reuse_ptr : 0;
0729    }
0730    else{
0731       return priv_allocation_command
0732          (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
0733    }
0734 }
0735 
0736 
0737 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0738 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0739    priv_allocation_command (boost::interprocess::allocation_type command,   size_type limit_size,
0740                        size_type &prefer_in_recvd_out_size,
0741                        void *&reuse_ptr, size_type sizeof_object)
0742 {
0743    void* ret;
0744    size_type const preferred_size = prefer_in_recvd_out_size;
0745    size_type const max_count = m_header.m_size/sizeof_object;
0746    if(limit_size > max_count || preferred_size > max_count){
0747       return reuse_ptr = 0, static_cast<void*>(0);
0748    }
0749    size_type l_size = limit_size*sizeof_object;
0750    size_type p_size = preferred_size*sizeof_object;
0751    size_type r_size;
0752    {
0753       //-----------------------
0754       boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0755       //-----------------------
0756       ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
0757    }
0758    prefer_in_recvd_out_size = r_size/sizeof_object;
0759    return ret;
0760 }
0761 
0762 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0763 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0764 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0765    size(const void *ptr) const
0766 {
0767    //We need no synchronization since this block's size is not going
0768    //to be modified by anyone else
0769    //Obtain the real size of the block
0770    return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
0771 }
0772 
0773 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0774 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
0775 {
0776    //-----------------------
0777    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0778    //-----------------------
0779    imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
0780 
0781    //Iterate through all blocks obtaining their size
0782    while(ib != ie){
0783       //Just clear user the memory part reserved for the user
0784       volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
0785       size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
0786       while(s--){
0787          *ptr++ = 0;
0788       }
0789 
0790       //This surprisingly is optimized out by Visual C++ 7.1 in release mode!
0791       //std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes
0792       //           , 0
0793       //           , ib->m_size*Alignment - BlockCtrlBytes);
0794       ++ib;
0795    }
0796 }
0797 
0798 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0799 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0800    priv_expand_both_sides(boost::interprocess::allocation_type command
0801                          ,size_type min_size
0802                          ,size_type &prefer_in_recvd_out_size
0803                          ,void *reuse_ptr
0804                          ,bool only_preferred_backwards
0805                          ,size_type backwards_multiple)
0806 {
0807    size_type const preferred_size = prefer_in_recvd_out_size;
0808    algo_impl_t::assert_alignment(reuse_ptr);
0809    if(command & boost::interprocess::expand_fwd){
0810       if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
0811          return reuse_ptr;
0812    }
0813    else{
0814       prefer_in_recvd_out_size = this->size(reuse_ptr);
0815       if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
0816          return reuse_ptr;
0817    }
0818 
0819    if(backwards_multiple){
0820       BOOST_ASSERT(0 == (min_size       % backwards_multiple));
0821       BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
0822    }
0823 
0824    if(command & boost::interprocess::expand_bwd){
0825       //Obtain the real size of the block
0826       block_ctrl *reuse = priv_get_block(reuse_ptr);
0827 
0828       //Sanity check
0829       algo_impl_t::assert_alignment(reuse);
0830 
0831       block_ctrl *prev_block;
0832 
0833       //If the previous block is not free, there is nothing to do
0834       if(priv_is_prev_allocated(reuse)){
0835          return 0;
0836       }
0837 
0838       prev_block = priv_prev_block(reuse);
0839       BOOST_ASSERT(!priv_is_allocated_block(prev_block));
0840 
0841       //Some sanity checks
0842       BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
0843       algo_impl_t::assert_alignment(prev_block);
0844 
0845       size_type needs_backwards_aligned;
0846       size_type lcm;
0847       if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
0848          ( backwards_multiple
0849          , prefer_in_recvd_out_size
0850          , only_preferred_backwards ? preferred_size : min_size
0851          , lcm, needs_backwards_aligned)){
0852          return 0;
0853       }
0854 
0855       //Check if previous block has enough size
0856       if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
0857          //Now take all next space. This will succeed
0858          if(command & boost::interprocess::expand_fwd){
0859             size_type received_size2;
0860             if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
0861                BOOST_ASSERT(0);
0862             }
0863             BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
0864          }
0865          //We need a minimum size to split the previous one
0866          if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
0867             block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
0868                (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
0869 
0870             //Free old previous buffer
0871             new_block->m_size =
0872                (AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment) & block_ctrl::size_mask;
0873             BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
0874             priv_mark_as_allocated_block(new_block);
0875 
0876             prev_block->m_size = size_type(reinterpret_cast<char*>(new_block) -
0877                                            reinterpret_cast<char*>(prev_block))/Alignment & block_ctrl::size_mask;
0878             BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
0879             priv_mark_as_free_block(prev_block);
0880 
0881             //Update the old previous block in the free blocks tree
0882             //If the new size fulfills tree invariants do nothing,
0883             //otherwise erase() + insert()
0884             {
0885                imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
0886                imultiset_iterator was_smaller_it(prev_block_it);
0887                if(prev_block_it != m_header.m_imultiset.begin() &&
0888                   (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
0889                   m_header.m_imultiset.erase(prev_block_it);
0890                   m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
0891                }
0892             }
0893 
0894             prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
0895             m_header.m_allocated += needs_backwards_aligned;
0896 
0897             //Check alignment
0898             algo_impl_t::assert_alignment(new_block);
0899 
0900             //If the backwards expansion has remaining bytes in the
0901             //first bytes, fill them with a pattern
0902             void *p = priv_get_user_buffer(new_block);
0903             void *user_ptr = reinterpret_cast<char*>(p);
0904             BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
0905             algo_impl_t::assert_alignment(user_ptr);
0906             return user_ptr;
0907          }
0908          //Check if there is no place to create a new block and
0909          //the whole new block is multiple of the backwards expansion multiple
0910          else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
0911                  0 == ((prev_block->m_size*Alignment) % lcm)) {
0912             //Erase old previous block, since we will change it
0913             m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
0914 
0915             //Just merge the whole previous block
0916             //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
0917             prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
0918 
0919             m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
0920             //Now update sizes
0921             prev_block->m_size = size_type(prev_block->m_size + reuse->m_size) & block_ctrl::size_mask;
0922             BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
0923             priv_mark_as_allocated_block(prev_block);
0924 
0925             //If the backwards expansion has remaining bytes in the
0926             //first bytes, fill them with a pattern
0927             void *user_ptr = priv_get_user_buffer(prev_block);
0928             BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
0929             algo_impl_t::assert_alignment(user_ptr);
0930             return user_ptr;
0931          }
0932          else{
0933             //Alignment issues
0934          }
0935       }
0936    }
0937    return 0;
0938 }
0939 
0940 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0941 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0942    deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
0943 {
0944    //-----------------------
0945    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0946    //-----------------------
0947    algo_impl_t::deallocate_many(this, chain);
0948 }
0949 
0950 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0951 void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0952    priv_allocate(boost::interprocess::allocation_type command
0953                 ,size_type limit_size
0954                 ,size_type &prefer_in_recvd_out_size
0955                 ,void *&reuse_ptr
0956                ,size_type backwards_multiple)
0957 {
0958    size_type const preferred_size = prefer_in_recvd_out_size;
0959    if(command & boost::interprocess::shrink_in_place){
0960       if(!reuse_ptr)  return static_cast<void*>(0);
0961       bool success =
0962          algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
0963       return success ? reuse_ptr : 0;
0964    }
0965 
0966    prefer_in_recvd_out_size = 0;
0967 
0968    if(limit_size > preferred_size)
0969       return reuse_ptr = 0, static_cast<void*>(0);
0970 
0971    //Number of units to request (including block_ctrl header)
0972    size_type preferred_units = priv_get_total_units(preferred_size);
0973 
0974    //Number of units to request (including block_ctrl header)
0975    size_type limit_units = priv_get_total_units(limit_size);
0976 
0977    //Expand in place
0978    prefer_in_recvd_out_size = preferred_size;
0979    if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
0980       void *ret = priv_expand_both_sides
0981          (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
0982       if(ret)
0983          return ret;
0984    }
0985 
0986    if(command & boost::interprocess::allocate_new){
0987       size_block_ctrl_compare comp;
0988       imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
0989 
0990       if(it != m_header.m_imultiset.end()){
0991          return reuse_ptr = 0, this->priv_check_and_allocate
0992             (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
0993       }
0994 
0995       if(it != m_header.m_imultiset.begin()&&
0996               (--it)->m_size >= limit_units){
0997          return reuse_ptr = 0, this->priv_check_and_allocate
0998             (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
0999       }
1000    }
1001 
1002 
1003    //Now try to expand both sides with min size
1004    if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
1005       return priv_expand_both_sides
1006          (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
1007    }
1008    return reuse_ptr = 0, static_cast<void*>(0);
1009 }
1010 
1011 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1012 inline
1013 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1014    rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
1015 {
1016    return const_cast<block_ctrl*>
1017       (move_detail::force_ptr<const block_ctrl*>
1018          (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
1019 }
1020 
1021 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1022 inline
1023 void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1024       priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1025 {  return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes);   }
1026 
1027 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1028 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
1029 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1030    priv_get_total_units(size_type userbytes)
1031 {
1032    if(userbytes < UsableByPreviousChunk)
1033       userbytes = UsableByPreviousChunk;
1034    size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
1035    if(units < BlockCtrlUnits) units = BlockCtrlUnits;
1036    return units;
1037 }
1038 
1039 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1040 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1041    priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
1042 {
1043    size_type const preferred_size = prefer_in_recvd_out_size;
1044    //Obtain the real size of the block
1045    block_ctrl *block = priv_get_block(ptr);
1046    size_type old_block_units = block->m_size;
1047 
1048    //The block must be marked as allocated and the sizes must be equal
1049    BOOST_ASSERT(priv_is_allocated_block(block));
1050 
1051    //Put this to a safe value
1052    prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1053    if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
1054       return true;
1055 
1056    //Now translate it to Alignment units
1057    const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
1058    const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
1059 
1060    //Some parameter checks
1061    BOOST_ASSERT(min_user_units <= preferred_user_units);
1062 
1063    block_ctrl *next_block;
1064 
1065    if(priv_is_allocated_block(next_block = priv_next_block(block))){
1066       return prefer_in_recvd_out_size >= min_size;
1067    }
1068    algo_impl_t::assert_alignment(next_block);
1069 
1070    //Is "block" + "next_block" big enough?
1071    const size_type merged_units = old_block_units + (size_type)next_block->m_size;
1072 
1073    //Now get the expansion size
1074    const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
1075 
1076    if(merged_user_units < min_user_units){
1077       prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
1078       return false;
1079    }
1080 
1081    //Now get the maximum size the user can allocate
1082    size_type intended_user_units = (merged_user_units < preferred_user_units) ?
1083       merged_user_units : preferred_user_units;
1084 
1085    //These are total units of the merged block (supposing the next block can be split)
1086    const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
1087 
1088    //Check if we can split the next one in two parts
1089    if((merged_units - intended_units) >=  BlockCtrlUnits){
1090       //This block is bigger than needed, split it in
1091       //two blocks, the first one will be merged and
1092       //the second's size will be the remaining space
1093       BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
1094       const size_type rem_units = merged_units - intended_units;
1095 
1096       //Check if we we need to update the old next block in the free blocks tree
1097       //If the new size fulfills tree invariants, we just need to replace the node
1098       //(the block start has been displaced), otherwise erase() + insert().
1099       //
1100       //This fixup must be done in two parts, because the new next block might
1101       //overwrite the tree hook of the old next block. So we first erase the
1102       //old if needed and we'll insert the new one after creating the new next
1103       imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
1104       m_header.m_imultiset.erase(old_next_block_it);
1105 
1106       //This is the remaining block
1107       block_ctrl *rem_block = 
1108          ::new(reinterpret_cast<char*>(block) + intended_units*Alignment, boost_container_new_t()) block_ctrl;
1109       rem_block->m_size = rem_units & block_ctrl::size_mask;
1110       algo_impl_t::assert_alignment(rem_block);
1111       BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1112       priv_mark_as_free_block(rem_block);
1113       m_header.m_imultiset.insert(*rem_block);
1114 
1115       //Write the new length
1116       block->m_size = (intended_user_units + AllocatedCtrlUnits) & block_ctrl::size_mask;
1117       BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1118       m_header.m_allocated += (intended_units - old_block_units)*Alignment;
1119    }
1120    //There is no free space to create a new node: just merge both blocks
1121    else{
1122       //Now we have to update the data in the tree
1123       m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
1124 
1125       //Write the new length
1126       block->m_size = merged_units & block_ctrl::size_mask;
1127       BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1128       m_header.m_allocated += (merged_units - old_block_units)*Alignment;
1129    }
1130    priv_mark_as_allocated_block(block);
1131    prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1132    return true;
1133 }
1134 
1135 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1136 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1137    rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
1138       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1139 {
1140    BOOST_ASSERT(!ptr->m_prev_allocated);
1141    return move_detail::force_ptr<block_ctrl*>
1142       (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
1143 }
1144 
1145 
1146 
1147 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1148 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1149    rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
1150       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
1151 {
1152    //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
1153    //distance with the end block
1154    BOOST_ASSERT(first_segment_block->m_prev_allocated);
1155    block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
1156       (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
1157    (void)end_block;
1158    BOOST_ASSERT(end_block->m_allocated == 1);
1159    BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
1160    BOOST_ASSERT(end_block > first_segment_block);
1161    return end_block;
1162 }
1163 
1164 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1165 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1166    rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
1167       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
1168 {
1169    //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
1170    //distance with the end block
1171    BOOST_ASSERT(end_segment_block->m_allocated);
1172    block_ctrl *first_block = move_detail::force_ptr<block_ctrl*>
1173       (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
1174    (void)first_block;
1175    BOOST_ASSERT(first_block->m_prev_allocated == 1);
1176    BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
1177    BOOST_ASSERT(end_segment_block > first_block);
1178    return first_block;
1179 }
1180 
1181 
1182 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1183 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1184    rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
1185       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1186 {
1187    return move_detail::force_ptr<block_ctrl*>
1188       (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
1189 }
1190 
1191 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1192 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
1193       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1194 {
1195    bool allocated = block->m_allocated != 0;
1196    #ifndef NDEBUG
1197    if(block != priv_end_block()){
1198       block_ctrl *next_block = move_detail::force_ptr<block_ctrl*>
1199          (reinterpret_cast<char*>(block) + block->m_size*Alignment);
1200       bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
1201       (void)next_block_prev_allocated;
1202       BOOST_ASSERT(allocated == next_block_prev_allocated);
1203    }
1204    #endif
1205    return allocated;
1206 }
1207 
1208 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1209 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
1210       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1211 {
1212    if(block->m_prev_allocated){
1213       return true;
1214    }
1215    else{
1216       #ifndef NDEBUG
1217       if(block != priv_first_block()){
1218          block_ctrl *prev = priv_prev_block(block);
1219          (void)prev;
1220          BOOST_ASSERT(!prev->m_allocated);
1221          BOOST_ASSERT(prev->m_size == block->m_prev_size);
1222       }
1223       #endif
1224       return false;
1225    }
1226 }
1227 
1228 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1229 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
1230       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1231 {
1232    block->m_allocated = 1;
1233    move_detail::force_ptr<block_ctrl*>
1234       (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
1235 }
1236 
1237 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1238 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
1239       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1240 {
1241    block->m_allocated = 0;
1242    block_ctrl *next_block = priv_next_block(block);
1243    next_block->m_prev_allocated = 0;
1244    next_block->m_prev_size = block->m_size;
1245 }
1246 
1247 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1248 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
1249    (size_type nunits
1250    ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
1251    ,size_type &received_size)
1252 {
1253    size_type upper_nunits = nunits + BlockCtrlUnits;
1254    imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
1255    algo_impl_t::assert_alignment(block);
1256 
1257    if (block->m_size >= upper_nunits){
1258       //This block is bigger than needed, split it in
1259       //two blocks, the first's size will be "units" and
1260       //the second's size "block->m_size-units"
1261       size_type block_old_size = block->m_size;
1262       block->m_size = nunits & block_ctrl::size_mask;
1263       BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1264 
1265       //This is the remaining block
1266       block_ctrl *rem_block =
1267          ::new(reinterpret_cast<char*>(block) + Alignment*nunits, boost_container_new_t()) block_ctrl;
1268       algo_impl_t::assert_alignment(rem_block);
1269       rem_block->m_size = (block_old_size - nunits) & block_ctrl::size_mask;
1270       BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1271       priv_mark_as_free_block(rem_block);
1272 
1273       //Now we have to update the data in the tree.
1274       //Use the position of the erased one as a hint
1275       m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
1276    }
1277    else if (block->m_size >= nunits){
1278       m_header.m_imultiset.erase(it_old);
1279    }
1280    else{
1281       BOOST_ASSERT(0);
1282       return 0;
1283    }
1284    //We need block_ctrl for deallocation stuff, so
1285    //return memory user can overwrite
1286    m_header.m_allocated += (size_type)block->m_size*Alignment;
1287    received_size =  ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1288 
1289    //Mark the block as allocated
1290    priv_mark_as_allocated_block(block);
1291 
1292    //Clear the memory occupied by the tree hook, since this won't be
1293    //cleared with zero_free_memory
1294    TreeHook *t = static_cast<TreeHook*>(block);
1295    //Just clear the memory part reserved for the user
1296    std::size_t tree_hook_offset_in_block = std::size_t((char*)t - (char*)block);
1297    //volatile char *ptr =
1298    char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
1299    const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
1300    std::memset(ptr, 0, s);
1301    this->priv_next_block(block)->m_prev_size = 0;
1302    return priv_get_user_buffer(block);
1303 }
1304 
1305 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1306 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
1307 {
1308    if(!addr)   return;
1309    //-----------------------
1310    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
1311    //-----------------------
1312    return this->priv_deallocate(addr);
1313 }
1314 
1315 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1316 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
1317 {
1318    if(!addr)   return;
1319 
1320    block_ctrl *block = priv_get_block(addr);
1321 
1322    //The blocks must be marked as allocated and the sizes must be equal
1323    BOOST_ASSERT(priv_is_allocated_block(block));
1324 
1325    //Check if alignment and block size are right
1326    algo_impl_t::assert_alignment(addr);
1327 
1328    size_type block_old_size = Alignment*(size_type)block->m_size;
1329    BOOST_ASSERT(m_header.m_allocated >= block_old_size);
1330 
1331    //Update used memory count
1332    m_header.m_allocated -= block_old_size;
1333 
1334    //The block to insert in the tree
1335    block_ctrl *block_to_insert = block;
1336 
1337    //Get the next block
1338    block_ctrl *const next_block  = priv_next_block(block);
1339    const bool merge_with_prev    = !priv_is_prev_allocated(block);
1340    const bool merge_with_next    = !priv_is_allocated_block(next_block);
1341 
1342    //Merge logic. First just update block sizes, then fix free blocks tree
1343    if(merge_with_prev || merge_with_next){
1344       //Merge if the previous is free
1345       if(merge_with_prev){
1346          //Get the previous block
1347          block_to_insert = priv_prev_block(block);
1348          block_to_insert->m_size = size_type(block_to_insert->m_size + block->m_size) & block_ctrl::size_mask;
1349          BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1350          m_header.m_imultiset.erase(Imultiset::s_iterator_to(*block_to_insert));
1351       }
1352       //Merge if the next is free
1353       if(merge_with_next){
1354          block_to_insert->m_size = size_type(block_to_insert->m_size + next_block->m_size) & block_ctrl::size_mask;
1355          BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1356          const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
1357          m_header.m_imultiset.erase(next_it);
1358       }
1359    }
1360    priv_mark_as_free_block(block_to_insert);
1361    m_header.m_imultiset.insert(*block_to_insert);
1362 }
1363 
1364 #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
1365 
1366 }  //namespace interprocess {
1367 }  //namespace boost {
1368 
1369 #include <boost/interprocess/detail/config_end.hpp>
1370 
1371 #endif   //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP