Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2026-05-11 08:24:34

0001 //////////////////////////////////////////////////////////////////////////////
0002 //
0003 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
0004 // Software License, Version 1.0. (See accompanying file
0005 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
0006 //
0007 // See http://www.boost.org/libs/interprocess for documentation.
0008 //
0009 //////////////////////////////////////////////////////////////////////////////
0010 
0011 #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
0012 #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
0013 
0014 #ifndef BOOST_CONFIG_HPP
0015 #  include <boost/config.hpp>
0016 #endif
0017 0018 ">#
0019 #if defined(BOOST_HAS_PRAGMA_ONCE)
0020 #  pragma once
0021 #endif
0022 
0023 #include <boost/interprocess/detail/config_begin.hpp>
0024 #include <boost/interprocess/detail/workaround.hpp>
0025 
0026 // interprocess
0027 #include <boost/interprocess/containers/allocation_type.hpp>
0028 #include <boost/interprocess/exceptions.hpp>
0029 #include <boost/interprocess/interprocess_fwd.hpp>
0030 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
0031 #include <boost/interprocess/offset_ptr.hpp>
0032 #include <boost/interprocess/sync/scoped_lock.hpp>
0033 // interprocess/detail
0034 #include <boost/interprocess/detail/min_max.hpp>
0035 #include <boost/interprocess/detail/math_functions.hpp>
0036 #include <boost/interprocess/detail/type_traits.hpp>
0037 #include <boost/interprocess/detail/utilities.hpp>
0038 // container
0039 #include <boost/container/detail/multiallocation_chain.hpp>
0040 // container/detail
0041 #include <boost/container/detail/placement_new.hpp>
0042 // move/detail
0043 #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
0044 #include <boost/move/detail/force_ptr.hpp> //make_unsigned, alignment_of
0045 // intrusive
0046 #include <boost/intrusive/pointer_traits.hpp>
0047 #include <boost/intrusive/set.hpp>
0048 // other boost
0049 #include <boost/assert.hpp>
0050 // std
0051 #include <climits>
0052 #include <cstring>
0053 
0054 //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0055 //to maintain ABI compatible with the original version
0056 //ABI had to be updated to fix compatibility issues when
0057 //sharing shared memory between 32 adn 64 bit processes.
0058 
0059 //!\file
0060 //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
0061 //!objects in shared memory. This class is intended as a base class for single segment
0062 //!and multi-segment implementations.
0063 
0064 namespace boost {
0065 namespace interprocess {
0066 
0067 //!This class implements an algorithm that stores the free nodes in a red-black tree
0068 //!to have logarithmic search/insert times.
0069 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0070 class rbtree_best_fit
0071 {
0072    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0073    //Non-copyable
0074    rbtree_best_fit();
0075    rbtree_best_fit(const rbtree_best_fit &);
0076    rbtree_best_fit &operator=(const rbtree_best_fit &);
0077 
0078    private:
0079    struct block_ctrl;
0080    typedef typename boost::intrusive::
0081       pointer_traits<VoidPointer>::template
0082          rebind_pointer<block_ctrl>::type                   block_ctrl_ptr;
0083 
0084    typedef typename boost::intrusive::
0085       pointer_traits<VoidPointer>::template
0086          rebind_pointer<char>::type                         char_ptr;
0087 
0088    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
0089 
0090    public:
0091    //!Shared mutex family used for the rest of the Interprocess framework
0092    typedef MutexFamily        mutex_family;
0093    //!Pointer type to be used with the rest of the Interprocess framework
0094    typedef VoidPointer        void_pointer;
0095    typedef ipcdetail::basic_multiallocation_chain<VoidPointer>  multiallocation_chain;
0096 
0097    typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
0098    typedef typename boost::container::dtl::make_unsigned<difference_type>::type     size_type;
0099 
0100    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0101 
0102    private:
0103 
0104    typedef typename bi::make_set_base_hook
0105       < bi::void_pointer<VoidPointer>
0106       , bi::optimize_size<true>
0107       , bi::link_mode<bi::normal_link> >::type           TreeHook;
0108 
0109    struct SizeHolder
0110    {
0111       static const size_type size_mask = size_type(-1) >> 2;
0112       //!Previous block's memory size (including block_ctrl
0113       //!header) in Alignment units. This field (UsableByPreviousChunk bytes)
0114       //!is OVERWRITTEN by the previous block if allocated (m_prev_allocated)
0115       size_type m_prev_size;
0116       //!This block's memory size (including block_ctrl
0117       //!header) in Alignment units
0118       size_type m_size      :  sizeof(size_type)*CHAR_BIT - 2;
0119       size_type m_prev_allocated :  1;
0120       size_type m_allocated :  1;
0121    };
0122 
0123    //!Block control structure
0124    struct block_ctrl
0125       :  public SizeHolder
0126       //This tree hook is overwritten when this block is used
0127       , public TreeHook
0128    {
0129       block_ctrl()
0130       {
0131          this->SizeHolder::m_size = 0;
0132          this->SizeHolder::m_allocated = 0;
0133          this->SizeHolder::m_prev_allocated = 0;
0134       }
0135 
0136       friend bool operator<(const block_ctrl &a, const block_ctrl &b)
0137       {  return a.SizeHolder::m_size < b.SizeHolder::m_size;  }
0138 
0139       friend bool operator==(const block_ctrl &a, const block_ctrl &b)
0140       {  return a.SizeHolder::m_size == b.SizeHolder::m_size;  }
0141    };
0142 
0143    struct size_block_ctrl_compare
0144    {
0145       bool operator()(size_type size, const block_ctrl &block) const
0146       {  return size < block.m_size;  }
0147 
0148       bool operator()(const block_ctrl &block, size_type size) const
0149       {  return block.m_size < size;  }
0150    };
0151 
0152    //!Shared mutex to protect memory allocate/deallocate
0153    typedef typename MutexFamily::mutex_type                       mutex_type;
0154    typedef typename bi::make_multiset
0155       <block_ctrl, bi::base_hook<TreeHook> >::type                Imultiset;
0156 
0157    typedef typename Imultiset::iterator                           imultiset_iterator;
0158    typedef typename Imultiset::const_iterator                     imultiset_const_iterator;
0159 
0160    //!This struct includes needed data and derives from
0161    //!mutex_type to allow EBO when using null mutex_type
0162    struct header_t : public mutex_type
0163    {
0164       Imultiset            m_imultiset;
0165 
0166       //!The extra size required by the segment
0167       size_type            m_extra_hdr_bytes;
0168       //!Allocated bytes for internal checking
0169       size_type            m_allocated;
0170       //!The size of the memory segment
0171       size_type            m_size;
0172    }  m_header;
0173 
0174    friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
0175 
0176    typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
0177 
0178    public:
0179    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
0180 
0181    //!Constructor. "size" is the total size of the managed memory segment,
0182    //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
0183    //!offset that the allocator should not use at all.
0184    rbtree_best_fit           (size_type size, size_type extra_hdr_bytes);
0185 
0186    //!Destructor.
0187    ~rbtree_best_fit();
0188 
0189    //!Obtains the minimum size needed by the algorithm
0190    static size_type get_min_size (size_type extra_hdr_bytes);
0191 
0192    //Functions for single segment management
0193 
0194    //!Allocates bytes, returns 0 if there is not more memory.
0195    //!Returned memory is aligned to Alignment bytes.
0196    void* allocate             (size_type nbytes);
0197 
0198    //!Deallocates previously allocated bytes
0199    void   deallocate(void* addr);
0200 
0201    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0202 
0203    //Experimental. Dont' use
0204 
0205    //!Multiple element allocation, same size
0206    //!Experimental. Dont' use
0207    void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
0208    {
0209       //-----------------------
0210       boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0211       //-----------------------
0212       algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
0213    }
0214 
0215    //!Multiple element allocation, different size
0216    //!Experimental. Dont' use
0217    void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
0218    {
0219       //-----------------------
0220       boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0221       //-----------------------
0222       algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
0223    }
0224 
0225    //!Multiple element allocation, different size
0226    //!Experimental. Dont' use
0227    void deallocate_many(multiallocation_chain &chain);
0228 
0229    template<class T>
0230    T* allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
0231       size_type& prefer_in_recvd_out_size, T*& reuse);
0232 
0233    void* raw_allocation_command(boost::interprocess::allocation_type command, size_type limit_object,
0234       size_type& prefer_in_recvd_out_size,
0235       void*& reuse_ptr, size_type sizeof_object = 1);
0236 
0237    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
0238 
0239    //!Returns the size of the memory segment
0240    size_type get_size()  const;
0241 
0242    //!Returns the number of free bytes of the segment
0243    size_type get_free_memory()  const;
0244 
0245    //!Initializes to zero all the memory that's not in use.
0246    //!This function is normally used for security reasons.
0247    void zero_free_memory();
0248 
0249    //!Increases managed memory in
0250    //!extra_size bytes more
0251    void grow(size_type extra_size);
0252 
0253    //!Decreases managed memory as much as possible
0254    void shrink_to_fit();
0255 
0256    //!Returns true if all allocated memory has been deallocated
0257    bool all_memory_deallocated();
0258 
0259    //!Makes an internal sanity check
0260    //!and returns true if success
0261    bool check_sanity();
0262 
0263    //!Returns the size of the buffer previously allocated pointed by ptr
0264    size_type size(const void *ptr) const;
0265 
0266    //!Allocates aligned bytes, returns 0 if there is not more memory.
0267    //!Alignment must be power of 2
0268    void* allocate_aligned     (size_type nbytes, size_type alignment);
0269 
0270    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0271    private:
0272    static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
0273 
0274    block_ctrl *priv_first_block();
0275 
0276    block_ctrl *priv_end_block();
0277 
0278    void* priv_allocation_command(boost::interprocess::allocation_type command,   size_type limit_size,
0279                         size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
0280 
0281 
0282    //!Real allocation algorithm with min allocation option
0283    void * priv_allocate( boost::interprocess::allocation_type command
0284                        , size_type limit_size, size_type &prefer_in_recvd_out_size
0285                        , void *&reuse_ptr, size_type backwards_multiple = 1);
0286 
0287    //!Obtains the block control structure of the user buffer
0288    static block_ctrl *priv_get_block(const void *ptr);
0289 
0290    //!Obtains the pointer returned to the user from the block control
0291    static void *priv_get_user_buffer(const block_ctrl *block);
0292 
0293    //!Returns the number of total units that a user buffer
0294    //!of "userbytes" bytes really occupies (including header)
0295    static size_type priv_get_total_units(size_type userbytes);
0296 
0297    //!Real expand function implementation
0298    bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
0299 
0300    //!Real expand to both sides implementation
0301    void* priv_expand_both_sides(boost::interprocess::allocation_type command
0302                                ,size_type min_size
0303                                ,size_type &prefer_in_recvd_out_size
0304                                ,void *reuse_ptr
0305                                ,bool only_preferred_backwards
0306                                ,size_type backwards_multiple);
0307 
0308    //!Returns true if the previous block is allocated
0309    bool priv_is_prev_allocated(block_ctrl *ptr);
0310 
0311    //!Get a pointer of the "end" block from the first block of the segment
0312    static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
0313 
0314    //!Get a pointer of the "first" block from the end block of the segment
0315    static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
0316 
0317    //!Get poitner of the previous block (previous block must be free)
0318    static block_ctrl * priv_prev_block(block_ctrl *ptr);
0319 
0320    //!Get the size in the tail of the previous block
0321    static block_ctrl * priv_next_block(block_ctrl *ptr);
0322 
0323    //!Check if this block is free (not allocated)
0324    bool priv_is_allocated_block(block_ctrl *ptr);
0325 
0326    //!Marks the block as allocated
0327    void priv_mark_as_allocated_block(block_ctrl *ptr);
0328 
0329    //!Marks the block as allocated
0330    void priv_mark_new_allocated_block(block_ctrl *ptr)
0331    {  return priv_mark_as_allocated_block(ptr); }
0332 
0333    //!Marks the block as allocated
0334    void priv_mark_as_free_block(block_ctrl *ptr);
0335 
0336    //!Checks if block has enough memory and splits/unlinks the block
0337    //!returning the address to the users
0338    void* priv_check_and_allocate(size_type units
0339                                 ,block_ctrl* block
0340                                 ,size_type &received_size);
0341    //!Real deallocation algorithm
0342    void priv_deallocate(void *addr);
0343 
0344    //!Makes a new memory portion available for allocation
0345    void priv_add_segment(void *addr, size_type size);
0346 
0347    public:
0348 
0349    static const size_type Alignment = !MemAlignment
0350       ? size_type(::boost::container::dtl::alignment_of
0351                   < ::boost::container::dtl::max_align_t>::value)
0352       : size_type(MemAlignment)
0353       ;
0354 
0355    private:
0356    //Due to embedded bits in size, Alignment must be at least 4
0357    BOOST_INTERPROCESS_STATIC_ASSERT((Alignment >= 4));
0358    //Due to rbtree size optimizations, Alignment must have at least pointer alignment
0359    BOOST_INTERPROCESS_STATIC_ASSERT((Alignment >= ::boost::container::dtl::alignment_of<void_pointer>::value));
0360    static const size_type AlignmentMask = (Alignment - 1);
0361    static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
0362    static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
0363    static const size_type AllocatedCtrlBytes  = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
0364    static const size_type AllocatedCtrlUnits  = AllocatedCtrlBytes/Alignment;
0365    static const size_type EndCtrlBlockBytes   = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
0366    static const size_type EndCtrlBlockUnits   = EndCtrlBlockBytes/Alignment;
0367    static const size_type UsableByPreviousChunk   = sizeof(size_type);
0368 
0369    //Make sure the maximum alignment is power of two
0370    BOOST_INTERPROCESS_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
0371    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
0372    public:
0373    static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
0374 };
0375 
0376 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0377 
0378 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0379 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0380        rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0381    ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
0382 {
0383    size_type uint_this      = (std::size_t)this_ptr;
0384    size_type main_hdr_end   = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
0385    size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
0386    size_type block1_off = aligned_main_hdr_end -  uint_this;
0387    algo_impl_t::assert_alignment(aligned_main_hdr_end);
0388    algo_impl_t::assert_alignment(uint_this + block1_off);
0389    return block1_off;
0390 }
0391 
0392 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0393 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0394    priv_add_segment(void *addr, size_type segment_size)
0395 {
0396    //Check alignment
0397    algo_impl_t::check_alignment(addr);
0398    //Check size
0399    BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
0400 
0401    //Initialize the first big block and the "end" node
0402    block_ctrl *first_big_block = ::new(addr, boost_container_new_t()) block_ctrl;
0403    first_big_block->m_size = (segment_size/Alignment - EndCtrlBlockUnits) & block_ctrl::size_mask;
0404    BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
0405 
0406    //The "end" node is just a node of size 0 with the "end" bit set
0407    SizeHolder *end_block =
0408       ::new(reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment, boost_container_new_t()) SizeHolder;
0409 
0410    //This will overwrite the prev part of the "end" node
0411    priv_mark_as_free_block (first_big_block);
0412    #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0413    first_big_block->m_prev_size = end_block->m_size =
0414       size_type(reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignment) & block_ctrl::size_mask;
0415    #else
0416    first_big_block->m_prev_size = end_block->m_size =
0417       size_type(reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment & block_ctrl::size_mask;
0418    #endif
0419    end_block->m_allocated = 1;
0420    first_big_block->m_prev_allocated = 1;
0421 
0422    BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
0423    BOOST_ASSERT(priv_prev_block((block_ctrl*)end_block) == first_big_block);
0424    BOOST_ASSERT(priv_first_block() == first_big_block);
0425    BOOST_ASSERT(priv_end_block() == end_block);
0426 
0427    //Some check to validate the algorithm, since it makes some assumptions
0428    //to optimize the space wasted in bookkeeping:
0429 
0430    //Check that the sizes of the header are placed before the rbtree
0431    BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
0432           < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
0433    //Insert it in the intrusive containers
0434    m_header.m_imultiset.insert(*first_big_block);
0435 }
0436 
0437 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0438 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
0439        rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0440    ::priv_first_block()
0441 {
0442    const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0443    return move_detail::force_ptr<block_ctrl*>(reinterpret_cast<char*>(this) + block1_off);
0444 }
0445 
0446 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0447 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
0448        rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0449    ::priv_end_block()
0450 {
0451    const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0452    const size_type original_first_block_size = (m_header.m_size - block1_off)/Alignment - EndCtrlBlockUnits;
0453    block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
0454       (reinterpret_cast<char*>(this) + block1_off + original_first_block_size*Alignment);
0455    return end_block;
0456 }
0457 
0458 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0459 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0460    rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
0461 {
0462    //Initialize the header
0463    m_header.m_allocated       = 0;
0464    m_header.m_size            = segment_size;
0465    m_header.m_extra_hdr_bytes = extra_hdr_bytes;
0466 
0467    //Now write calculate the offset of the first big block that will
0468    //cover the whole segment
0469    BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
0470    size_type block1_off  = priv_first_block_offset_from_this(this, extra_hdr_bytes);
0471    priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
0472 }
0473 
0474 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0475 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
0476 {
0477    //There is a memory leak!
0478 //   BOOST_ASSERT(m_header.m_allocated == 0);
0479 //   BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
0480 }
0481 
0482 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0483 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
0484 {
0485    //Get the address of the first block
0486    block_ctrl *first_block = priv_first_block();
0487    block_ctrl *old_end_block = priv_end_block();
0488    size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
0489                                     reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
0490 
0491    //Update managed buffer's size
0492    m_header.m_size += extra_size;
0493 
0494    //We need at least BlockCtrlBytes blocks to create a new block
0495    if((m_header.m_size - old_border_offset) < BlockCtrlBytes){
0496       return;
0497    }
0498 
0499    //Now create a new block between the old end and the new end
0500    size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
0501    block_ctrl *new_end_block = move_detail::force_ptr<block_ctrl*>
0502       (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
0503 
0504    //the last and first block are special:
0505    //new_end_block->m_size & first_block->m_prev_size store the absolute value
0506    //between them
0507    new_end_block->m_allocated = 1;
0508    #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0509    new_end_block->m_size      = size_type(reinterpret_cast<char*>(first_block) -
0510                                           reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
0511    #else
0512    new_end_block->m_size      = size_type(reinterpret_cast<char*>(new_end_block) -
0513                                           reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
0514    #endif
0515    first_block->m_prev_size = new_end_block->m_size;
0516    first_block->m_prev_allocated = 1;
0517    BOOST_ASSERT(new_end_block == priv_end_block());
0518 
0519    //The old end block is the new block
0520    block_ctrl *new_block = old_end_block;
0521    new_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
0522                                  reinterpret_cast<char*>(new_block))/Alignment & block_ctrl::size_mask;
0523    BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
0524    priv_mark_as_allocated_block(new_block);
0525    BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
0526 
0527    m_header.m_allocated += (size_type)new_block->m_size*Alignment;
0528 
0529    //Now deallocate the newly created block
0530    this->priv_deallocate(priv_get_user_buffer(new_block));
0531 }
0532 
0533 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0534 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
0535 {
0536    //Get the address of the first block
0537    block_ctrl *first_block = priv_first_block();
0538    algo_impl_t::assert_alignment(first_block);
0539 
0540    //block_ctrl *old_end_block = priv_end_block(first_block);
0541    block_ctrl *old_end_block = priv_end_block();
0542    algo_impl_t::assert_alignment(old_end_block);
0543    size_type old_end_block_size = old_end_block->m_size;
0544 
0545    void *unique_buffer = 0;
0546    block_ctrl *last_block;
0547    //Check if no memory is allocated between the first and last block
0548    if(priv_next_block(first_block) == old_end_block){
0549       //If so check if we can allocate memory
0550       size_type ignore_recvd = 0;
0551       void *ignore_reuse = 0;
0552       unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
0553       //If not, return, we can't shrink
0554       if(!unique_buffer)
0555          return;
0556       //If we can, mark the position just after the new allocation as the new end
0557       algo_impl_t::assert_alignment(unique_buffer);
0558       block_ctrl *unique_block = priv_get_block(unique_buffer);
0559       BOOST_ASSERT(priv_is_allocated_block(unique_block));
0560       algo_impl_t::assert_alignment(unique_block);
0561       last_block = priv_next_block(unique_block);
0562       BOOST_ASSERT(!priv_is_allocated_block(last_block));
0563       algo_impl_t::assert_alignment(last_block);
0564    }
0565    else{
0566       //If memory is allocated, check if the last block is allocated
0567       if(priv_is_prev_allocated(old_end_block))
0568          return;
0569       //If not, mark last block after the free block
0570       last_block = priv_prev_block(old_end_block);
0571    }
0572 
0573    size_type last_block_size = last_block->m_size;
0574 
0575    //Erase block from the free tree, since we will erase it
0576    m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
0577 
0578    size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
0579                                        reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
0580 
0581    block_ctrl *new_end_block = last_block;
0582    algo_impl_t::assert_alignment(new_end_block);
0583 
0584    //Write new end block attributes
0585    #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0586    new_end_block->m_size = 
0587       size_type(reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
0588    first_block->m_prev_size = new_end_block->m_size;
0589    #else
0590    new_end_block->m_size =
0591       size_type(reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
0592    first_block->m_prev_size = new_end_block->m_size;
0593    #endif
0594 
0595    new_end_block->m_allocated = 1;
0596    (void)last_block_size;
0597    (void)old_end_block_size;
0598    BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
0599 
0600    //Update managed buffer's size
0601    m_header.m_size = shrunk_border_offset & block_ctrl::size_mask;
0602    BOOST_ASSERT(priv_end_block() == new_end_block);
0603    if(unique_buffer)
0604       priv_deallocate(unique_buffer);
0605 }
0606 
0607 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0608 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0609 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size()  const
0610 {  return m_header.m_size;  }
0611 
0612 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0613 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0614 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory()  const
0615 {
0616    return m_header.m_size - m_header.m_allocated -
0617       priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0618 }
0619 
0620 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0621 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0622 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0623    get_min_size (size_type extra_hdr_bytes)
0624 {
0625    return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
0626            algo_impl_t::ceil_units(extra_hdr_bytes) +
0627            BlockCtrlUnits + EndCtrlBlockUnits)*Alignment;
0628 }
0629 
0630 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0631 inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0632     all_memory_deallocated()
0633 {
0634    //-----------------------
0635    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0636    //-----------------------
0637    size_type block1_off  =
0638       priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0639 
0640    return m_header.m_allocated == 0 &&
0641       m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
0642        (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
0643        && m_header.m_imultiset.begin()->m_size ==
0644          (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
0645 }
0646 
0647 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0648 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0649     check_sanity()
0650 {
0651    //-----------------------
0652    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0653    //-----------------------
0654    imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
0655 
0656    size_type free_memory = 0;
0657 
0658    //Iterate through all blocks obtaining their size
0659    for(; ib != ie; ++ib){
0660       free_memory += (size_type)ib->m_size*Alignment;
0661       if(!algo_impl_t::check_alignment(&*ib))
0662          return false;
0663    }
0664 
0665    //Check allocated bytes are less than size
0666    if(m_header.m_allocated > m_header.m_size){
0667       return false;
0668    }
0669 
0670    size_type block1_off  =
0671       priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0672 
0673    //Check free bytes are less than size
0674    if(free_memory > (m_header.m_size - block1_off)){
0675       return false;
0676    }
0677    return true;
0678 }
0679 
0680 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0681 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0682    allocate(size_type nbytes)
0683 {
0684    //-----------------------
0685    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0686    //-----------------------
0687    size_type ignore_recvd = nbytes;
0688    void *ignore_reuse = 0;
0689    return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
0690 }
0691 
0692 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0693 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0694    allocate_aligned(size_type nbytes, size_type alignment)
0695 {
0696    //-----------------------
0697    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0698    //-----------------------
0699    return algo_impl_t::allocate_aligned(this, nbytes, alignment);
0700 }
0701 
0702 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0703 template<class T>
0704 inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0705    allocation_command  (boost::interprocess::allocation_type command,   size_type limit_size,
0706                         size_type &prefer_in_recvd_out_size, T *&reuse)
0707 {
0708    void* raw_reuse = reuse;
0709    void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
0710    reuse = static_cast<T*>(raw_reuse);
0711    BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
0712    return static_cast<T*>(ret);
0713 }
0714 
0715 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0716 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0717    raw_allocation_command  (boost::interprocess::allocation_type command,   size_type limit_objects,
0718                         size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
0719 {
0720    size_type const preferred_objects = prefer_in_recvd_out_objects;
0721    if(!sizeof_object)
0722       return reuse_ptr = 0, static_cast<void*>(0);
0723    if(command & boost::interprocess::try_shrink_in_place){
0724       if(!reuse_ptr)  return static_cast<void*>(0);
0725       const bool success = algo_impl_t::try_shrink
0726          ( this, reuse_ptr, limit_objects*sizeof_object
0727          , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
0728       prefer_in_recvd_out_objects /= sizeof_object;
0729       return success ? reuse_ptr : 0;
0730    }
0731    else{
0732       return priv_allocation_command
0733          (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
0734    }
0735 }
0736 
0737 
0738 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0739 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0740    priv_allocation_command (boost::interprocess::allocation_type command,   size_type limit_size,
0741                        size_type &prefer_in_recvd_out_size,
0742                        void *&reuse_ptr, size_type sizeof_object)
0743 {
0744    void* ret;
0745    size_type const preferred_size = prefer_in_recvd_out_size;
0746    size_type const max_count = m_header.m_size/sizeof_object;
0747    if(limit_size > max_count || preferred_size > max_count){
0748       return reuse_ptr = 0, static_cast<void*>(0);
0749    }
0750    size_type l_size = limit_size*sizeof_object;
0751    size_type p_size = preferred_size*sizeof_object;
0752    size_type r_size;
0753    {
0754       //-----------------------
0755       boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0756       //-----------------------
0757       ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
0758    }
0759    prefer_in_recvd_out_size = r_size/sizeof_object;
0760    return ret;
0761 }
0762 
0763 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0764 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0765 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0766    size(const void *ptr) const
0767 {
0768    //We need no synchronization since this block's size is not going
0769    //to be modified by anyone else
0770    //Obtain the real size of the block
0771    return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
0772 }
0773 
0774 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0775 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
0776 {
0777    //-----------------------
0778    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0779    //-----------------------
0780    imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
0781 
0782    //Iterate through all blocks obtaining their size
0783    while(ib != ie){
0784       //Just clear user the memory part reserved for the user
0785       volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
0786       size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
0787       while(s--){
0788          *ptr++ = 0;
0789       }
0790 
0791       //This surprisingly is optimized out by Visual C++ 7.1 in release mode!
0792       //std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes
0793       //           , 0
0794       //           , ib->m_size*Alignment - BlockCtrlBytes);
0795       ++ib;
0796    }
0797 }
0798 
0799 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0800 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0801    priv_expand_both_sides(boost::interprocess::allocation_type command
0802                          ,size_type min_size
0803                          ,size_type &prefer_in_recvd_out_size
0804                          ,void *reuse_ptr
0805                          ,bool only_preferred_backwards
0806                          ,size_type backwards_multiple)
0807 {
0808    size_type const preferred_size = prefer_in_recvd_out_size;
0809    algo_impl_t::assert_alignment(reuse_ptr);
0810    if(command & boost::interprocess::expand_fwd){
0811       if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
0812          return reuse_ptr;
0813    }
0814    else{
0815       prefer_in_recvd_out_size = this->size(reuse_ptr);
0816       if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
0817          return reuse_ptr;
0818    }
0819 
0820    if(backwards_multiple){
0821       BOOST_ASSERT(0 == (min_size       % backwards_multiple));
0822       BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
0823    }
0824 
0825    if(command & boost::interprocess::expand_bwd){
0826       //Obtain the real size of the block
0827       block_ctrl *reuse = priv_get_block(reuse_ptr);
0828 
0829       //Sanity check
0830       algo_impl_t::assert_alignment(reuse);
0831 
0832       block_ctrl *prev_block;
0833 
0834       //If the previous block is not free, there is nothing to do
0835       if(priv_is_prev_allocated(reuse)){
0836          return 0;
0837       }
0838 
0839       prev_block = priv_prev_block(reuse);
0840       BOOST_ASSERT(!priv_is_allocated_block(prev_block));
0841 
0842       //Some sanity checks
0843       BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
0844       algo_impl_t::assert_alignment(prev_block);
0845 
0846       size_type needs_backwards_aligned;
0847       size_type lcm;
0848       if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
0849          ( backwards_multiple
0850          , prefer_in_recvd_out_size
0851          , only_preferred_backwards ? preferred_size : min_size
0852          , lcm, needs_backwards_aligned)){
0853          return 0;
0854       }
0855 
0856       //Check if previous block has enough size
0857       if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
0858          //Now take all next space. This will succeed
0859          if(command & boost::interprocess::expand_fwd){
0860             size_type received_size2;
0861             if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
0862                BOOST_ASSERT(0);
0863             }
0864             BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
0865          }
0866          //We need a minimum size to split the previous one
0867          if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
0868             block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
0869                (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
0870 
0871             //Free old previous buffer
0872             new_block->m_size =
0873                (AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment) & block_ctrl::size_mask;
0874             BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
0875             priv_mark_as_allocated_block(new_block);
0876 
0877             prev_block->m_size = size_type(reinterpret_cast<char*>(new_block) -
0878                                            reinterpret_cast<char*>(prev_block))/Alignment & block_ctrl::size_mask;
0879             BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
0880             priv_mark_as_free_block(prev_block);
0881 
0882             //Update the old previous block in the free blocks tree
0883             //If the new size fulfills tree invariants do nothing,
0884             //otherwise erase() + insert()
0885             {
0886                imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
0887                imultiset_iterator was_smaller_it(prev_block_it);
0888                if(prev_block_it != m_header.m_imultiset.begin() &&
0889                   (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
0890                   m_header.m_imultiset.erase(prev_block_it);
0891                   m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
0892                }
0893             }
0894 
0895             prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
0896             m_header.m_allocated += needs_backwards_aligned;
0897 
0898             //Check alignment
0899             algo_impl_t::assert_alignment(new_block);
0900 
0901             //If the backwards expansion has remaining bytes in the
0902             //first bytes, fill them with a pattern
0903             void *p = priv_get_user_buffer(new_block);
0904             void *user_ptr = reinterpret_cast<char*>(p);
0905             BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
0906             algo_impl_t::assert_alignment(user_ptr);
0907             return user_ptr;
0908          }
0909          //Check if there is no place to create a new block and
0910          //the whole new block is multiple of the backwards expansion multiple
0911          else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
0912                  0 == ((prev_block->m_size*Alignment) % lcm)) {
0913             //Erase old previous block, since we will change it
0914             m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
0915 
0916             //Just merge the whole previous block
0917             //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
0918             prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
0919 
0920             m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
0921             //Now update sizes
0922             prev_block->m_size = size_type(prev_block->m_size + reuse->m_size) & block_ctrl::size_mask;
0923             BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
0924             priv_mark_as_allocated_block(prev_block);
0925 
0926             //If the backwards expansion has remaining bytes in the
0927             //first bytes, fill them with a pattern
0928             void *user_ptr = priv_get_user_buffer(prev_block);
0929             BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
0930             algo_impl_t::assert_alignment(user_ptr);
0931             return user_ptr;
0932          }
0933          else{
0934             //Alignment issues
0935          }
0936       }
0937    }
0938    return 0;
0939 }
0940 
0941 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0942 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0943    deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
0944 {
0945    //-----------------------
0946    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0947    //-----------------------
0948    algo_impl_t::deallocate_many(this, chain);
0949 }
0950 
0951 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0952 void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0953    priv_allocate(boost::interprocess::allocation_type command
0954                 ,size_type limit_size
0955                 ,size_type &prefer_in_recvd_out_size
0956                 ,void *&reuse_ptr
0957                ,size_type backwards_multiple)
0958 {
0959    size_type const preferred_size = prefer_in_recvd_out_size;
0960    if(command & boost::interprocess::shrink_in_place){
0961       if(!reuse_ptr)  return static_cast<void*>(0);
0962       bool success =
0963          algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
0964       return success ? reuse_ptr : 0;
0965    }
0966 
0967    prefer_in_recvd_out_size = 0;
0968 
0969    if(limit_size > preferred_size)
0970       return reuse_ptr = 0, static_cast<void*>(0);
0971 
0972    //Number of units to request (including block_ctrl header)
0973    size_type preferred_units = priv_get_total_units(preferred_size);
0974 
0975    //Number of units to request (including block_ctrl header)
0976    size_type limit_units = priv_get_total_units(limit_size);
0977 
0978    //Expand in place
0979    prefer_in_recvd_out_size = preferred_size;
0980    if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
0981       void *ret = priv_expand_both_sides
0982          (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
0983       if(ret)
0984          return ret;
0985    }
0986 
0987    if(command & boost::interprocess::allocate_new){
0988       size_block_ctrl_compare comp;
0989       imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
0990 
0991       if(it != m_header.m_imultiset.end()){
0992          return reuse_ptr = 0, this->priv_check_and_allocate
0993             (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
0994       }
0995 
0996       if(it != m_header.m_imultiset.begin()&&
0997               (--it)->m_size >= limit_units){
0998          return reuse_ptr = 0, this->priv_check_and_allocate
0999             (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
1000       }
1001    }
1002 
1003 
1004    //Now try to expand both sides with min size
1005    if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
1006       return priv_expand_both_sides
1007          (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
1008    }
1009    return reuse_ptr = 0, static_cast<void*>(0);
1010 }
1011 
1012 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1013 inline
1014 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1015    rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
1016 {
1017    return const_cast<block_ctrl*>
1018       (move_detail::force_ptr<const block_ctrl*>
1019          (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
1020 }
1021 
1022 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1023 inline
1024 void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1025       priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1026 {  return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes);   }
1027 
1028 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1029 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
1030 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1031    priv_get_total_units(size_type userbytes)
1032 {
1033    if(userbytes < UsableByPreviousChunk)
1034       userbytes = UsableByPreviousChunk;
1035    size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
1036    if(units < BlockCtrlUnits) units = BlockCtrlUnits;
1037    return units;
1038 }
1039 
1040 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1041 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1042    priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
1043 {
1044    size_type const preferred_size = prefer_in_recvd_out_size;
1045    //Obtain the real size of the block
1046    block_ctrl *block = priv_get_block(ptr);
1047    size_type old_block_units = block->m_size;
1048 
1049    //The block must be marked as allocated and the sizes must be equal
1050    BOOST_ASSERT(priv_is_allocated_block(block));
1051 
1052    //Put this to a safe value
1053    prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1054    if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
1055       return true;
1056 
1057    //Now translate it to Alignment units
1058    const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
1059    const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
1060 
1061    //Some parameter checks
1062    BOOST_ASSERT(min_user_units <= preferred_user_units);
1063 
1064    block_ctrl *next_block;
1065 
1066    if(priv_is_allocated_block(next_block = priv_next_block(block))){
1067       return prefer_in_recvd_out_size >= min_size;
1068    }
1069    algo_impl_t::assert_alignment(next_block);
1070 
1071    //Is "block" + "next_block" big enough?
1072    const size_type merged_units = old_block_units + (size_type)next_block->m_size;
1073 
1074    //Now get the expansion size
1075    const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
1076 
1077    if(merged_user_units < min_user_units){
1078       prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
1079       return false;
1080    }
1081 
1082    //Now get the maximum size the user can allocate
1083    size_type intended_user_units = (merged_user_units < preferred_user_units) ?
1084       merged_user_units : preferred_user_units;
1085 
1086    //These are total units of the merged block (supposing the next block can be split)
1087    const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
1088 
1089    //Check if we can split the next one in two parts
1090    if((merged_units - intended_units) >=  BlockCtrlUnits){
1091       //This block is bigger than needed, split it in
1092       //two blocks, the first one will be merged and
1093       //the second's size will be the remaining space
1094       BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
1095       const size_type rem_units = merged_units - intended_units;
1096 
1097       //Check if we we need to update the old next block in the free blocks tree
1098       //If the new size fulfills tree invariants, we just need to replace the node
1099       //(the block start has been displaced), otherwise erase() + insert().
1100       //
1101       //This fixup must be done in two parts, because the new next block might
1102       //overwrite the tree hook of the old next block. So we first erase the
1103       //old if needed and we'll insert the new one after creating the new next
1104       imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
1105       m_header.m_imultiset.erase(old_next_block_it);
1106 
1107       //This is the remaining block
1108       block_ctrl *rem_block = 
1109          ::new(reinterpret_cast<char*>(block) + intended_units*Alignment, boost_container_new_t()) block_ctrl;
1110       rem_block->m_size = rem_units & block_ctrl::size_mask;
1111       algo_impl_t::assert_alignment(rem_block);
1112       BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1113       priv_mark_as_free_block(rem_block);
1114       m_header.m_imultiset.insert(*rem_block);
1115 
1116       //Write the new length
1117       block->m_size = (intended_user_units + AllocatedCtrlUnits) & block_ctrl::size_mask;
1118       BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1119       m_header.m_allocated += (intended_units - old_block_units)*Alignment;
1120    }
1121    //There is no free space to create a new node: just merge both blocks
1122    else{
1123       //Now we have to update the data in the tree
1124       m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
1125 
1126       //Write the new length
1127       block->m_size = merged_units & block_ctrl::size_mask;
1128       BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1129       m_header.m_allocated += (merged_units - old_block_units)*Alignment;
1130    }
1131    priv_mark_as_allocated_block(block);
1132    prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1133    return true;
1134 }
1135 
1136 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1137 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1138    rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
1139       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1140 {
1141    BOOST_ASSERT(!ptr->m_prev_allocated);
1142    return move_detail::force_ptr<block_ctrl*>
1143       (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
1144 }
1145 
1146 
1147 
1148 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1149 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1150    rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
1151       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
1152 {
1153    //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
1154    //distance with the end block
1155    BOOST_ASSERT(first_segment_block->m_prev_allocated);
1156    block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
1157       (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
1158    (void)end_block;
1159    BOOST_ASSERT(end_block->m_allocated == 1);
1160    BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
1161    BOOST_ASSERT(end_block > first_segment_block);
1162    return end_block;
1163 }
1164 
1165 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1166 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1167    rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
1168       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
1169 {
1170    //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
1171    //distance with the end block
1172    BOOST_ASSERT(end_segment_block->m_allocated);
1173    block_ctrl *first_block = move_detail::force_ptr<block_ctrl*>
1174       (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
1175    (void)first_block;
1176    BOOST_ASSERT(first_block->m_prev_allocated == 1);
1177    BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
1178    BOOST_ASSERT(end_segment_block > first_block);
1179    return first_block;
1180 }
1181 
1182 
1183 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1184 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1185    rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
1186       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1187 {
1188    return move_detail::force_ptr<block_ctrl*>
1189       (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
1190 }
1191 
1192 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1193 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
1194       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1195 {
1196    bool allocated = block->m_allocated != 0;
1197    #ifndef NDEBUG
1198    if(block != priv_end_block()){
1199       block_ctrl *next_block = move_detail::force_ptr<block_ctrl*>
1200          (reinterpret_cast<char*>(block) + block->m_size*Alignment);
1201       bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
1202       (void)next_block_prev_allocated;
1203       BOOST_ASSERT(allocated == next_block_prev_allocated);
1204    }
1205    #endif
1206    return allocated;
1207 }
1208 
1209 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1210 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
1211       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1212 {
1213    if(block->m_prev_allocated){
1214       return true;
1215    }
1216    else{
1217       #ifndef NDEBUG
1218       if(block != priv_first_block()){
1219          block_ctrl *prev = priv_prev_block(block);
1220          (void)prev;
1221          BOOST_ASSERT(!prev->m_allocated);
1222          BOOST_ASSERT(prev->m_size == block->m_prev_size);
1223       }
1224       #endif
1225       return false;
1226    }
1227 }
1228 
1229 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1230 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
1231       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1232 {
1233    block->m_allocated = 1;
1234    move_detail::force_ptr<block_ctrl*>
1235       (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
1236 }
1237 
1238 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1239 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
1240       (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1241 {
1242    block->m_allocated = 0;
1243    block_ctrl *next_block = priv_next_block(block);
1244    next_block->m_prev_allocated = 0;
1245    next_block->m_prev_size = block->m_size;
1246 }
1247 
1248 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1249 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
1250    (size_type nunits
1251    ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
1252    ,size_type &received_size)
1253 {
1254    size_type upper_nunits = nunits + BlockCtrlUnits;
1255    imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
1256    algo_impl_t::assert_alignment(block);
1257 
1258    if (block->m_size >= upper_nunits){
1259       //This block is bigger than needed, split it in
1260       //two blocks, the first's size will be "units" and
1261       //the second's size "block->m_size-units"
1262       size_type block_old_size = block->m_size;
1263       block->m_size = nunits & block_ctrl::size_mask;
1264       BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1265 
1266       //This is the remaining block
1267       block_ctrl *rem_block =
1268          ::new(reinterpret_cast<char*>(block) + Alignment*nunits, boost_container_new_t()) block_ctrl;
1269       algo_impl_t::assert_alignment(rem_block);
1270       rem_block->m_size = (block_old_size - nunits) & block_ctrl::size_mask;
1271       BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1272       priv_mark_as_free_block(rem_block);
1273 
1274       //Now we have to update the data in the tree.
1275       //Use the position of the erased one as a hint
1276       m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
1277    }
1278    else if (block->m_size >= nunits){
1279       m_header.m_imultiset.erase(it_old);
1280    }
1281    else{
1282       BOOST_ASSERT(0);
1283       return 0;
1284    }
1285    //We need block_ctrl for deallocation stuff, so
1286    //return memory user can overwrite
1287    m_header.m_allocated += (size_type)block->m_size*Alignment;
1288    received_size =  ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1289 
1290    //Mark the block as allocated
1291    priv_mark_as_allocated_block(block);
1292 
1293    //Clear the memory occupied by the tree hook, since this won't be
1294    //cleared with zero_free_memory
1295    TreeHook *t = static_cast<TreeHook*>(block);
1296    //Just clear the memory part reserved for the user
1297    std::size_t tree_hook_offset_in_block = std::size_t((char*)t - (char*)block);
1298    //volatile char *ptr =
1299    char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
1300    const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
1301    std::memset(ptr, 0, s);
1302    this->priv_next_block(block)->m_prev_size = 0;
1303    return priv_get_user_buffer(block);
1304 }
1305 
1306 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1307 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
1308 {
1309    if(!addr)   return;
1310    //-----------------------
1311    boost::interprocess::scoped_lock<mutex_type> guard(m_header);
1312    //-----------------------
1313    return this->priv_deallocate(addr);
1314 }
1315 
1316 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1317 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
1318 {
1319    if(!addr)   return;
1320 
1321    block_ctrl *block = priv_get_block(addr);
1322 
1323    //The blocks must be marked as allocated and the sizes must be equal
1324    BOOST_ASSERT(priv_is_allocated_block(block));
1325 
1326    //Check if alignment and block size are right
1327    algo_impl_t::assert_alignment(addr);
1328 
1329    size_type block_old_size = Alignment*(size_type)block->m_size;
1330    BOOST_ASSERT(m_header.m_allocated >= block_old_size);
1331 
1332    //Update used memory count
1333    m_header.m_allocated -= block_old_size;
1334 
1335    //The block to insert in the tree
1336    block_ctrl *block_to_insert = block;
1337 
1338    //Get the next block
1339    block_ctrl *const next_block  = priv_next_block(block);
1340    const bool merge_with_prev    = !priv_is_prev_allocated(block);
1341    const bool merge_with_next    = !priv_is_allocated_block(next_block);
1342 
1343    //Merge logic. First just update block sizes, then fix free blocks tree
1344    if(merge_with_prev || merge_with_next){
1345       //Merge if the previous is free
1346       if(merge_with_prev){
1347          //Get the previous block
1348          block_to_insert = priv_prev_block(block);
1349          block_to_insert->m_size = size_type(block_to_insert->m_size + block->m_size) & block_ctrl::size_mask;
1350          BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1351          m_header.m_imultiset.erase(Imultiset::s_iterator_to(*block_to_insert));
1352       }
1353       //Merge if the next is free
1354       if(merge_with_next){
1355          block_to_insert->m_size = size_type(block_to_insert->m_size + next_block->m_size) & block_ctrl::size_mask;
1356          BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1357          const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
1358          m_header.m_imultiset.erase(next_it);
1359       }
1360    }
1361    priv_mark_as_free_block(block_to_insert);
1362    m_header.m_imultiset.insert(*block_to_insert);
1363 }
1364 
1365 #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
1366 
1367 }  //namespace interprocess {
1368 }  //namespace boost {
1369 
1370 #include <boost/interprocess/detail/config_end.hpp>
1371 
1372 #endif   //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP