Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:38:29

0001 //////////////////////////////////////////////////////////////////////////////
0002 //
0003 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
0004 // Software License, Version 1.0. (See accompanying file
0005 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
0006 //
0007 // See http://www.boost.org/libs/interprocess for documentation.
0008 //
0009 //////////////////////////////////////////////////////////////////////////////
0010 
0011 #ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
0012 #define BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
0013 
0014 #ifndef BOOST_CONFIG_HPP
0015 #  include <boost/config.hpp>
0016 #endif
0017 #
0018 #if defined(BOOST_HAS_PRAGMA_ONCE)
0019 #  pragma once
0020 #endif
0021 
0022 #include <boost/interprocess/detail/config_begin.hpp>
0023 #include <boost/interprocess/detail/workaround.hpp>
0024 
0025 #include <boost/intrusive/pointer_traits.hpp>
0026 
0027 #include <boost/interprocess/interprocess_fwd.hpp>
0028 #include <boost/interprocess/containers/allocation_type.hpp>
0029 #include <boost/container/detail/multiallocation_chain.hpp>
0030 #include <boost/interprocess/offset_ptr.hpp>
0031 #include <boost/interprocess/sync/interprocess_mutex.hpp>
0032 #include <boost/interprocess/exceptions.hpp>
0033 #include <boost/interprocess/detail/utilities.hpp>
0034 #include <boost/interprocess/detail/min_max.hpp>
0035 #include <boost/interprocess/detail/type_traits.hpp>
0036 #include <boost/interprocess/sync/scoped_lock.hpp>
0037 #include <boost/intrusive/pointer_traits.hpp>
0038 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
0039 #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
0040 #include <boost/move/detail/force_ptr.hpp>
0041 #include <boost/intrusive/detail/minimal_pair_header.hpp>
0042 #include <cstring>
0043 #include <boost/assert.hpp>
0044 
0045 //!\file
0046 //!Describes sequential fit algorithm used to allocate objects in shared memory.
0047 //!This class is intended as a base class for single segment and multi-segment
0048 //!implementations.
0049 
0050 namespace boost {
0051 namespace interprocess {
0052 namespace ipcdetail {
0053 
0054 //!This class implements the simple sequential fit algorithm with a simply
0055 //!linked list of free buffers.
0056 //!This class is intended as a base class for single segment and multi-segment
0057 //!implementations.
0058 template<class MutexFamily, class VoidPointer>
0059 class simple_seq_fit_impl
0060 {
0061    //Non-copyable
0062    simple_seq_fit_impl();
0063    simple_seq_fit_impl(const simple_seq_fit_impl &);
0064    simple_seq_fit_impl &operator=(const simple_seq_fit_impl &);
0065 
0066    typedef typename boost::intrusive::
0067       pointer_traits<VoidPointer>::template
0068          rebind_pointer<char>::type                         char_ptr;
0069 
0070    public:
0071 
0072    //!Shared interprocess_mutex family used for the rest of the Interprocess framework
0073    typedef MutexFamily        mutex_family;
0074    //!Pointer type to be used with the rest of the Interprocess framework
0075    typedef VoidPointer        void_pointer;
0076    typedef boost::container::dtl::
0077       basic_multiallocation_chain<VoidPointer>     multiallocation_chain;
0078 
0079    typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
0080    typedef typename boost::container::dtl::make_unsigned<difference_type>::type size_type;
0081 
0082 
0083    private:
0084    class block_ctrl;
0085    friend class block_ctrl;
0086 
0087    typedef typename boost::intrusive::
0088       pointer_traits<VoidPointer>::template
0089          rebind_pointer<block_ctrl>::type                   block_ctrl_ptr;
0090 
0091    //!Block control structure
0092    class block_ctrl
0093    {
0094       public:
0095       static const size_type size_mask = size_type(-1);
0096       //!Offset pointer to the next block.
0097       block_ctrl_ptr m_next;
0098       //!This block's memory size (including block_ctrl
0099       //!header) in BasicSize units
0100       size_type    m_size;
0101 
0102       size_type get_user_bytes() const
0103       {  return this->m_size*Alignment - BlockCtrlBytes; }
0104 
0105       size_type get_total_bytes() const
0106       {  return this->m_size*Alignment; }
0107    };
0108 
0109    //!Shared interprocess_mutex to protect memory allocate/deallocate
0110    typedef typename MutexFamily::mutex_type        interprocess_mutex;
0111 
0112    //!This struct includes needed data and derives from
0113    //!interprocess_mutex to allow EBO when using null interprocess_mutex
0114    struct header_t : public interprocess_mutex
0115    {
0116       //!Pointer to the first free block
0117       block_ctrl        m_root;
0118       //!Allocated bytes for internal checking
0119       size_type         m_allocated;
0120       //!The size of the memory segment
0121       size_type         m_size;
0122       //!The extra size required by the segment
0123       size_type         m_extra_hdr_bytes;
0124    }  m_header;
0125 
0126    friend class ipcdetail::memory_algorithm_common<simple_seq_fit_impl>;
0127 
0128    typedef ipcdetail::memory_algorithm_common<simple_seq_fit_impl> algo_impl_t;
0129 
0130    public:
0131    //!Constructor. "size" is the total size of the managed memory segment,
0132    //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(simple_seq_fit_impl)
0133    //!offset that the allocator should not use at all.
0134    simple_seq_fit_impl           (size_type size, size_type extra_hdr_bytes);
0135 
0136    //!Destructor
0137    ~simple_seq_fit_impl();
0138 
0139    //!Obtains the minimum size needed by the algorithm
0140    static size_type get_min_size (size_type extra_hdr_bytes);
0141 
0142    //Functions for single segment management
0143 
0144    //!Allocates bytes, returns 0 if there is not more memory
0145    void* allocate             (size_type nbytes);
0146 
0147    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0148 
0149    //!Multiple element allocation, same size
0150    void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
0151    {
0152       //-----------------------
0153       boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0154       //-----------------------
0155       algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
0156    }
0157 
0158    //!Multiple element allocation, different size
0159    void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
0160    {
0161       //-----------------------
0162       boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0163       //-----------------------
0164       algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
0165    }
0166 
0167    //!Multiple element deallocation
0168    void deallocate_many(multiallocation_chain &chain);
0169 
0170    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
0171 
0172    //!Deallocates previously allocated bytes
0173    void   deallocate          (void *addr);
0174 
0175    //!Returns the size of the memory segment
0176    size_type get_size()  const;
0177 
0178    //!Returns the number of free bytes of the memory segment
0179    size_type get_free_memory()  const;
0180 
0181    //!Increases managed memory in extra_size bytes more
0182    void grow(size_type extra_size);
0183 
0184    //!Decreases managed memory as much as possible
0185    void shrink_to_fit();
0186 
0187    //!Returns true if all allocated memory has been deallocated
0188    bool all_memory_deallocated();
0189 
0190    //!Makes an internal sanity check and returns true if success
0191    bool check_sanity();
0192 
0193    //!Initializes to zero all the memory that's not in use.
0194    //!This function is normally used for security reasons.
0195    void zero_free_memory();
0196 
0197    template<class T>
0198    T *allocation_command  (boost::interprocess::allocation_type command,   size_type limit_size,
0199                            size_type &prefer_in_recvd_out_size, T *&reuse);
0200 
0201    void * raw_allocation_command  (boost::interprocess::allocation_type command,   size_type limit_size,
0202                                size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object = 1);
0203 
0204    //!Returns the size of the buffer previously allocated pointed by ptr
0205    size_type size(const void *ptr) const;
0206 
0207    //!Allocates aligned bytes, returns 0 if there is not more memory.
0208    //!Alignment must be power of 2
0209    void* allocate_aligned     (size_type nbytes, size_type alignment);
0210 
0211    private:
0212 
0213    //!Obtains the pointer returned to the user from the block control
0214    static void *priv_get_user_buffer(const block_ctrl *block);
0215 
0216    //!Obtains the block control structure of the user buffer
0217    static block_ctrl *priv_get_block(const void *ptr);
0218 
0219    //!Real allocation algorithm with min allocation option
0220    void * priv_allocate(boost::interprocess::allocation_type command
0221                         ,size_type min_size
0222                         ,size_type &prefer_in_recvd_out_size, void *&reuse_ptr);
0223 
0224    void * priv_allocation_command(boost::interprocess::allocation_type command
0225                                  ,size_type min_size
0226                                  ,size_type &prefer_in_recvd_out_size
0227                                  ,void *&reuse_ptr
0228                                  ,size_type sizeof_object);
0229 
0230    //!Returns the number of total units that a user buffer
0231    //!of "userbytes" bytes really occupies (including header)
0232    static size_type priv_get_total_units(size_type userbytes);
0233 
0234    static size_type priv_first_block_offset(const void *this_ptr, size_type extra_hdr_bytes);
0235    size_type priv_block_end_offset() const;
0236 
0237    //!Returns next block if it's free.
0238    //!Returns 0 if next block is not free.
0239    block_ctrl *priv_next_block_if_free(block_ctrl *ptr);
0240 
0241    //!Check if this block is free (not allocated)
0242    bool priv_is_allocated_block(block_ctrl *ptr);
0243 
0244    //!Returns previous block's if it's free.
0245    //!Returns 0 if previous block is not free.
0246    std::pair<block_ctrl*, block_ctrl*> priv_prev_block_if_free(block_ctrl *ptr);
0247 
0248    //!Real expand function implementation
0249    bool priv_expand(void *ptr, size_type min_size, size_type &prefer_in_recvd_out_size);
0250 
0251    //!Real expand to both sides implementation
0252    void* priv_expand_both_sides(boost::interprocess::allocation_type command
0253                                ,size_type min_size, size_type &prefer_in_recvd_out_size
0254                                ,void *reuse_ptr
0255                                ,bool only_preferred_backwards);
0256 
0257    //!Real private aligned allocation function
0258    //void* priv_allocate_aligned     (size_type nbytes, size_type alignment);
0259 
0260    //!Checks if block has enough memory and splits/unlinks the block
0261    //!returning the address to the users
0262    void* priv_check_and_allocate(size_type units
0263                                 ,block_ctrl* prev
0264                                 ,block_ctrl* block
0265                                 ,size_type &received_size);
0266    //!Real deallocation algorithm
0267    void priv_deallocate(void *addr);
0268 
0269    //!Makes a new memory portion available for allocation
0270    void priv_add_segment(void *addr, size_type size);
0271 
0272    void priv_mark_new_allocated_block(block_ctrl *block);
0273 
0274    public:
0275    static const size_type Alignment      = ::boost::container::dtl::alignment_of
0276       < ::boost::container::dtl::max_align_t>::value;
0277    private:
0278    static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
0279    static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
0280    static const size_type MinBlockUnits  = BlockCtrlUnits;
0281    static const size_type MinBlockSize   = MinBlockUnits*Alignment;
0282    static const size_type AllocatedCtrlBytes = BlockCtrlBytes;
0283    static const size_type AllocatedCtrlUnits = BlockCtrlUnits;
0284    static const size_type UsableByPreviousChunk = 0;
0285 
0286    public:
0287    static const size_type PayloadPerAllocation = BlockCtrlBytes;
0288 };
0289 
0290 template<class MutexFamily, class VoidPointer>
0291 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0292 simple_seq_fit_impl<MutexFamily, VoidPointer>
0293    ::priv_first_block_offset(const void *this_ptr, size_type extra_hdr_bytes)
0294 {
0295    //First align "this" pointer
0296    size_type uint_this         = (std::size_t)this_ptr;
0297    size_type uint_aligned_this = uint_this/Alignment*Alignment;
0298    size_type this_disalignment = (uint_this - uint_aligned_this);
0299    size_type block1_off =
0300       ipcdetail::get_rounded_size(sizeof(simple_seq_fit_impl) + extra_hdr_bytes + this_disalignment, Alignment)
0301       - this_disalignment;
0302    algo_impl_t::assert_alignment(this_disalignment + block1_off);
0303    return block1_off;
0304 }
0305 
0306 template<class MutexFamily, class VoidPointer>
0307 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0308 simple_seq_fit_impl<MutexFamily, VoidPointer>
0309    ::priv_block_end_offset() const
0310 {
0311    //First align "this" pointer
0312    size_type uint_this         = (std::size_t)this;
0313    size_type uint_aligned_this = uint_this/Alignment*Alignment;
0314    size_type this_disalignment = (uint_this - uint_aligned_this);
0315    size_type old_end =
0316       ipcdetail::get_truncated_size(m_header.m_size + this_disalignment, Alignment)
0317       - this_disalignment;
0318    algo_impl_t::assert_alignment(old_end + this_disalignment);
0319    return old_end;
0320 }
0321 
0322 template<class MutexFamily, class VoidPointer>
0323 inline simple_seq_fit_impl<MutexFamily, VoidPointer>::
0324    simple_seq_fit_impl(size_type segment_size, size_type extra_hdr_bytes)
0325 {
0326    //Initialize sizes and counters
0327    m_header.m_allocated = 0;
0328    m_header.m_size      = segment_size;
0329    m_header.m_extra_hdr_bytes = extra_hdr_bytes;
0330 
0331    //Initialize pointers
0332    size_type block1_off = priv_first_block_offset(this, extra_hdr_bytes);
0333 
0334    m_header.m_root.m_next  = move_detail::force_ptr<block_ctrl*>
0335       ((reinterpret_cast<char*>(this) + block1_off));
0336    algo_impl_t::assert_alignment(ipcdetail::to_raw_pointer(m_header.m_root.m_next));
0337    m_header.m_root.m_next->m_size  = (segment_size - block1_off)/Alignment;
0338    m_header.m_root.m_next->m_next  = &m_header.m_root;
0339 }
0340 
0341 template<class MutexFamily, class VoidPointer>
0342 inline simple_seq_fit_impl<MutexFamily, VoidPointer>::~simple_seq_fit_impl()
0343 {
0344    //There is a memory leak!
0345 //   BOOST_ASSERT(m_header.m_allocated == 0);
0346 //   BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
0347 }
0348 
0349 template<class MutexFamily, class VoidPointer>
0350 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::grow(size_type extra_size)
0351 {
0352    //Old highest address block's end offset
0353    size_type old_end = this->priv_block_end_offset();
0354 
0355    //Update managed buffer's size
0356    m_header.m_size += extra_size;
0357 
0358    //We need at least MinBlockSize blocks to create a new block
0359    if((m_header.m_size - old_end) < MinBlockSize){
0360       return;
0361    }
0362 
0363    //We'll create a new free block with extra_size bytes
0364 
0365    block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
0366       (reinterpret_cast<char*>(this) + old_end);
0367 
0368    algo_impl_t::assert_alignment(new_block);
0369    new_block->m_next = 0;
0370    new_block->m_size = (m_header.m_size - old_end)/Alignment;
0371    m_header.m_allocated += new_block->m_size*Alignment;
0372    this->priv_deallocate(priv_get_user_buffer(new_block));
0373 }
0374 
0375 template<class MutexFamily, class VoidPointer>
0376 void simple_seq_fit_impl<MutexFamily, VoidPointer>::shrink_to_fit()
0377 {
0378    //Get the root and the first memory block
0379    block_ctrl *prev                 = &m_header.m_root;
0380    block_ctrl *last                 = &m_header.m_root;
0381    block_ctrl *block                = ipcdetail::to_raw_pointer(last->m_next);
0382    block_ctrl *root                 = &m_header.m_root;
0383 
0384    //No free block?
0385    if(block == root) return;
0386 
0387    //Iterate through the free block list
0388    while(block != root){
0389       prev  = last;
0390       last  = block;
0391       block = ipcdetail::to_raw_pointer(block->m_next);
0392    }
0393 
0394    char *last_free_end_address   = reinterpret_cast<char*>(last) + last->m_size*Alignment;
0395    if(last_free_end_address != (reinterpret_cast<char*>(this) + priv_block_end_offset())){
0396       //there is an allocated block in the end of this block
0397       //so no shrinking is possible
0398       return;
0399    }
0400 
0401    //Check if have only 1 big free block
0402    void *unique_block = 0;
0403    if(!m_header.m_allocated){
0404       BOOST_ASSERT(prev == root);
0405       size_type ignore_recvd = 0;
0406       void *ignore_reuse = 0;
0407       unique_block = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
0408       if(!unique_block)
0409          return;
0410       last = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
0411       BOOST_ASSERT(last_free_end_address == (reinterpret_cast<char*>(last) + last->m_size*Alignment));
0412    }
0413    size_type last_units = last->m_size;
0414 
0415    size_type received_size;
0416    void *addr = priv_check_and_allocate(last_units, prev, last, received_size);
0417    (void)addr;
0418    BOOST_ASSERT(addr);
0419    BOOST_ASSERT(received_size == last_units*Alignment - AllocatedCtrlBytes);
0420 
0421    //Shrink it
0422    m_header.m_size /= Alignment;
0423    m_header.m_size -= last->m_size;
0424    m_header.m_size *= Alignment;
0425    m_header.m_allocated -= last->m_size*Alignment;
0426 
0427    if(unique_block)
0428       priv_deallocate(unique_block);
0429 }
0430 
0431 template<class MutexFamily, class VoidPointer>
0432 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
0433    priv_mark_new_allocated_block(block_ctrl *new_block)
0434 {
0435    new_block->m_next = 0;
0436 }
0437 
0438 template<class MutexFamily, class VoidPointer>
0439 inline
0440 typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
0441    simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_get_block(const void *ptr)
0442 {
0443    return const_cast<block_ctrl*>(move_detail::force_ptr<const block_ctrl*>
0444       (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
0445 }
0446 
0447 template<class MutexFamily, class VoidPointer>
0448 inline
0449 void *simple_seq_fit_impl<MutexFamily, VoidPointer>::
0450       priv_get_user_buffer(const typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *block)
0451 {
0452    return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes);
0453 }
0454 
0455 template<class MutexFamily, class VoidPointer>
0456 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_add_segment(void *addr, size_type segment_size)
0457 {
0458    algo_impl_t::assert_alignment(addr);
0459    //Check size
0460    BOOST_ASSERT(!(segment_size < MinBlockSize));
0461    if(segment_size < MinBlockSize)
0462       return;
0463    //Construct big block using the new segment
0464    block_ctrl *new_block   = static_cast<block_ctrl *>(addr);
0465    new_block->m_size       = segment_size/Alignment;
0466    new_block->m_next       = 0;
0467    //Simulate this block was previously allocated
0468    m_header.m_allocated   += new_block->m_size*Alignment;
0469    //Return block and insert it in the free block list
0470    this->priv_deallocate(priv_get_user_buffer(new_block));
0471 }
0472 
0473 template<class MutexFamily, class VoidPointer>
0474 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0475 simple_seq_fit_impl<MutexFamily, VoidPointer>::get_size()  const
0476    {  return m_header.m_size;  }
0477 
0478 template<class MutexFamily, class VoidPointer>
0479 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0480 simple_seq_fit_impl<MutexFamily, VoidPointer>::get_free_memory()  const
0481 {
0482    return m_header.m_size - m_header.m_allocated -
0483       algo_impl_t::multiple_of_units(sizeof(*this) + m_header.m_extra_hdr_bytes);
0484 }
0485 
0486 template<class MutexFamily, class VoidPointer>
0487 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0488 simple_seq_fit_impl<MutexFamily, VoidPointer>::
0489    get_min_size (size_type extra_hdr_bytes)
0490 {
0491    return ipcdetail::get_rounded_size((size_type)sizeof(simple_seq_fit_impl),Alignment) +
0492           ipcdetail::get_rounded_size(extra_hdr_bytes,Alignment)
0493           + MinBlockSize;
0494 }
0495 
0496 template<class MutexFamily, class VoidPointer>
0497 inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
0498     all_memory_deallocated()
0499 {
0500    //-----------------------
0501    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0502    //-----------------------
0503    return m_header.m_allocated == 0 &&
0504           ipcdetail::to_raw_pointer(m_header.m_root.m_next->m_next) == &m_header.m_root;
0505 }
0506 
0507 template<class MutexFamily, class VoidPointer>
0508 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::zero_free_memory()
0509 {
0510    //-----------------------
0511    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0512    //-----------------------
0513    block_ctrl *block = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
0514 
0515    //Iterate through all free portions
0516    do{
0517       //Just clear user the memory part reserved for the user
0518       std::memset( priv_get_user_buffer(block)
0519                  , 0
0520              , block->get_user_bytes());
0521       block = ipcdetail::to_raw_pointer(block->m_next);
0522    }
0523    while(block != &m_header.m_root);
0524 }
0525 
0526 template<class MutexFamily, class VoidPointer>
0527 inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
0528     check_sanity()
0529 {
0530    //-----------------------
0531    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0532    //-----------------------
0533    block_ctrl *block = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
0534 
0535    size_type free_memory = 0;
0536 
0537    //Iterate through all blocks obtaining their size
0538    while(block != &m_header.m_root){
0539       algo_impl_t::assert_alignment(block);
0540       if(!algo_impl_t::check_alignment(block))
0541          return false;
0542       //Free blocks's next must be always valid
0543       block_ctrl *next = ipcdetail::to_raw_pointer(block->m_next);
0544       if(!next){
0545          return false;
0546       }
0547       free_memory += block->m_size*Alignment;
0548       block = next;
0549    }
0550 
0551    //Check allocated bytes are less than size
0552    if(m_header.m_allocated > m_header.m_size){
0553       return false;
0554    }
0555 
0556    //Check free bytes are less than size
0557    if(free_memory > m_header.m_size){
0558       return false;
0559    }
0560    return true;
0561 }
0562 
0563 template<class MutexFamily, class VoidPointer>
0564 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0565    allocate(size_type nbytes)
0566 {
0567    //-----------------------
0568    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0569    //-----------------------
0570    size_type ignore_recvd = nbytes;
0571    void *ignore_reuse = 0;
0572    return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
0573 }
0574 
0575 template<class MutexFamily, class VoidPointer>
0576 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0577    allocate_aligned(size_type nbytes, size_type alignment)
0578 {
0579    //-----------------------
0580    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0581    //-----------------------
0582    return algo_impl_t::
0583       allocate_aligned(this, nbytes, alignment);
0584 }
0585 
0586 template<class MutexFamily, class VoidPointer>
0587 template<class T>
0588 inline T* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0589    allocation_command  (boost::interprocess::allocation_type command,   size_type limit_size,
0590                         size_type &prefer_in_recvd_out_size, T *&reuse_ptr)
0591 {
0592    void *raw_reuse = reuse_ptr;
0593    void * const ret = priv_allocation_command
0594       (command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
0595    BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
0596    reuse_ptr = static_cast<T*>(raw_reuse);
0597    return static_cast<T*>(ret);
0598 }
0599 
0600 template<class MutexFamily, class VoidPointer>
0601 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0602    raw_allocation_command  (boost::interprocess::allocation_type command, size_type limit_objects,
0603                         size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
0604 {
0605    size_type const preferred_objects = prefer_in_recvd_out_size;
0606    if(!sizeof_object){
0607       return reuse_ptr = 0, static_cast<void*>(0);
0608   }
0609    if(command & boost::interprocess::try_shrink_in_place){
0610       if(!reuse_ptr) return static_cast<void*>(0);
0611       prefer_in_recvd_out_size = preferred_objects*sizeof_object;
0612       bool success = algo_impl_t::try_shrink
0613          ( this, reuse_ptr, limit_objects*sizeof_object, prefer_in_recvd_out_size);
0614       prefer_in_recvd_out_size /= sizeof_object;
0615       return success ? reuse_ptr : 0;
0616    }
0617    else{
0618       return priv_allocation_command
0619          (command, limit_objects, prefer_in_recvd_out_size, reuse_ptr, sizeof_object);
0620    }
0621 }
0622 
0623 template<class MutexFamily, class VoidPointer>
0624 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0625    priv_allocation_command (boost::interprocess::allocation_type command,   size_type limit_size,
0626                        size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
0627 {
0628    size_type const preferred_size = prefer_in_recvd_out_size;
0629    command &= ~boost::interprocess::expand_bwd;
0630    if(!command){
0631       return reuse_ptr = 0, static_cast<void*>(0);
0632    }
0633 
0634    size_type max_count = m_header.m_size/sizeof_object;
0635    if(limit_size > max_count || preferred_size > max_count){
0636       return reuse_ptr = 0, static_cast<void*>(0);
0637    }
0638    size_type l_size = limit_size*sizeof_object;
0639    size_type r_size = preferred_size*sizeof_object;
0640    void *ret = 0;
0641    {
0642       //-----------------------
0643       boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0644       //-----------------------
0645       ret = priv_allocate(command, l_size, r_size, reuse_ptr);
0646    }
0647    prefer_in_recvd_out_size = r_size/sizeof_object;
0648    return ret;
0649 }
0650 
0651 template<class MutexFamily, class VoidPointer>
0652 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0653 simple_seq_fit_impl<MutexFamily, VoidPointer>::size(const void *ptr) const
0654 {
0655    //We need no synchronization since this block is not going
0656    //to be modified
0657    //Obtain the real size of the block
0658    const block_ctrl *block = static_cast<const block_ctrl*>(priv_get_block(ptr));
0659    return block->get_user_bytes();
0660 }
0661 
0662 template<class MutexFamily, class VoidPointer>
0663 void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0664    priv_expand_both_sides(boost::interprocess::allocation_type command
0665                          ,size_type min_size
0666                          ,size_type &prefer_in_recvd_out_size
0667                          ,void *reuse_ptr
0668                          ,bool only_preferred_backwards)
0669 {
0670    size_type const preferred_size = prefer_in_recvd_out_size;
0671    typedef std::pair<block_ctrl *, block_ctrl *> prev_block_t;
0672    block_ctrl *reuse = priv_get_block(reuse_ptr);
0673    prefer_in_recvd_out_size = 0;
0674 
0675    if(this->size(reuse_ptr) > min_size){
0676       prefer_in_recvd_out_size = this->size(reuse_ptr);
0677       return reuse_ptr;
0678    }
0679 
0680    if(command & boost::interprocess::expand_fwd){
0681       if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
0682          return reuse_ptr;
0683    }
0684    else{
0685       prefer_in_recvd_out_size = this->size(reuse_ptr);
0686    }
0687    if(command & boost::interprocess::expand_bwd){
0688       size_type extra_forward = !prefer_in_recvd_out_size ? 0 : prefer_in_recvd_out_size + BlockCtrlBytes;
0689       prev_block_t prev_pair = priv_prev_block_if_free(reuse);
0690       block_ctrl *prev = prev_pair.second;
0691       if(!prev){
0692          return 0;
0693       }
0694 
0695       size_type needs_backwards =
0696          ipcdetail::get_rounded_size(preferred_size - extra_forward, Alignment);
0697 
0698       if(!only_preferred_backwards){
0699             max_value(ipcdetail::get_rounded_size(min_size - extra_forward, Alignment)
0700                      ,min_value(prev->get_user_bytes(), needs_backwards));
0701       }
0702 
0703       //Check if previous block has enough size
0704       if((prev->get_user_bytes()) >=  needs_backwards){
0705          //Now take all next space. This will succeed
0706          if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, prefer_in_recvd_out_size)){
0707             BOOST_ASSERT(0);
0708          }
0709 
0710          //We need a minimum size to split the previous one
0711          if((prev->get_user_bytes() - needs_backwards) > 2*BlockCtrlBytes){
0712              block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
0713                   (reinterpret_cast<char*>(reuse) - needs_backwards - BlockCtrlBytes);
0714 
0715             new_block->m_next = 0;
0716             new_block->m_size =
0717                BlockCtrlUnits + (needs_backwards + extra_forward)/Alignment;
0718             prev->m_size =
0719                (prev->get_total_bytes() - needs_backwards)/Alignment - BlockCtrlUnits;
0720             prefer_in_recvd_out_size = needs_backwards + extra_forward;
0721             m_header.m_allocated += needs_backwards + BlockCtrlBytes;
0722             return priv_get_user_buffer(new_block);
0723          }
0724          else{
0725             //Just merge the whole previous block
0726             block_ctrl *prev_2_block = prev_pair.first;
0727             //Update received size and allocation
0728             prefer_in_recvd_out_size = extra_forward + prev->get_user_bytes();
0729             m_header.m_allocated += prev->get_total_bytes();
0730             //Now unlink it from previous block
0731             prev_2_block->m_next = prev->m_next;
0732             prev->m_size = reuse->m_size + prev->m_size;
0733             prev->m_next = 0;
0734             priv_get_user_buffer(prev);
0735          }
0736       }
0737    }
0738    return 0;
0739 }
0740 
0741 template<class MutexFamily, class VoidPointer>
0742 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
0743    deallocate_many(typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_chain &chain)
0744 {
0745    //-----------------------
0746    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0747    //-----------------------
0748    while(!chain.empty()){
0749       this->priv_deallocate(to_raw_pointer(chain.pop_front()));
0750    }
0751 }
0752 
0753 template<class MutexFamily, class VoidPointer>
0754 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0755 simple_seq_fit_impl<MutexFamily, VoidPointer>::
0756    priv_get_total_units(size_type userbytes)
0757 {
0758    size_type s = ipcdetail::get_rounded_size(userbytes, Alignment)/Alignment;
0759    if(!s)   ++s;
0760    return BlockCtrlUnits + s;
0761 }
0762 
0763 template<class MutexFamily, class VoidPointer>
0764 void * simple_seq_fit_impl<MutexFamily, VoidPointer>::
0765    priv_allocate(boost::interprocess::allocation_type command
0766                 ,size_type limit_size, size_type &prefer_in_recvd_out_size, void *&reuse_ptr)
0767 {
0768    size_type const preferred_size = prefer_in_recvd_out_size;
0769    if(command & boost::interprocess::shrink_in_place){
0770       if(!reuse_ptr)  return static_cast<void*>(0);
0771       bool success = algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size);
0772       return success ? reuse_ptr : 0;
0773    }
0774    prefer_in_recvd_out_size = 0;
0775 
0776    if(limit_size > preferred_size){
0777       return reuse_ptr = 0, static_cast<void*>(0);
0778    }
0779 
0780    //Number of units to request (including block_ctrl header)
0781    size_type nunits = ipcdetail::get_rounded_size(preferred_size, Alignment)/Alignment + BlockCtrlUnits;
0782 
0783    //Get the root and the first memory block
0784    block_ctrl *prev                 = &m_header.m_root;
0785    block_ctrl *block                = ipcdetail::to_raw_pointer(prev->m_next);
0786    block_ctrl *root                 = &m_header.m_root;
0787    block_ctrl *biggest_block        = 0;
0788    block_ctrl *prev_biggest_block   = 0;
0789    size_type biggest_size         = 0;
0790 
0791    //Expand in place
0792    if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
0793       void *ret = priv_expand_both_sides(command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, true);
0794       if(ret){
0795          algo_impl_t::assert_alignment(ret);
0796          return ret;
0797       }
0798    }
0799 
0800    if(command & boost::interprocess::allocate_new){
0801       prefer_in_recvd_out_size = 0;
0802       while(block != root){
0803          //Update biggest block pointers
0804          if(block->m_size > biggest_size){
0805             prev_biggest_block = prev;
0806             biggest_size  = block->m_size;
0807             biggest_block = block;
0808          }
0809          algo_impl_t::assert_alignment(block);
0810          void *addr = this->priv_check_and_allocate(nunits, prev, block, prefer_in_recvd_out_size);
0811          if(addr){
0812             algo_impl_t::assert_alignment(addr);
0813             return reuse_ptr = 0, addr;
0814          }
0815          //Bad luck, let's check next block
0816          prev  = block;
0817          block = ipcdetail::to_raw_pointer(block->m_next);
0818       }
0819 
0820       //Bad luck finding preferred_size, now if we have any biggest_block
0821       //try with this block
0822       if(biggest_block){
0823          size_type limit_units = ipcdetail::get_rounded_size(limit_size, Alignment)/Alignment + BlockCtrlUnits;
0824          if(biggest_block->m_size < limit_units){
0825             return reuse_ptr = 0, static_cast<void*>(0);
0826          }
0827          void *ret = this->priv_check_and_allocate
0828             (biggest_block->m_size, prev_biggest_block, biggest_block, prefer_in_recvd_out_size = biggest_block->m_size*Alignment - BlockCtrlUnits);
0829          BOOST_ASSERT(ret != 0);
0830          algo_impl_t::assert_alignment(ret);
0831          return reuse_ptr = 0, ret;
0832       }
0833    }
0834    //Now try to expand both sides with min size
0835    if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
0836       void *ret = priv_expand_both_sides (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false);
0837       algo_impl_t::assert_alignment(ret);
0838       return ret;
0839    }
0840    return reuse_ptr = 0, static_cast<void*>(0);
0841 }
0842 
0843 template<class MutexFamily, class VoidPointer> inline
0844 bool simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_is_allocated_block
0845       (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *block)
0846 {  return block->m_next == 0;  }
0847 
0848 template<class MutexFamily, class VoidPointer>
0849 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
0850    simple_seq_fit_impl<MutexFamily, VoidPointer>::
0851       priv_next_block_if_free
0852          (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr)
0853 {
0854    //Take the address where the next block should go
0855    block_ctrl *next_block = move_detail::force_ptr<block_ctrl*>
0856       (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
0857 
0858    //Check if the adjacent block is in the managed segment
0859    char *this_char_ptr = reinterpret_cast<char*>(this);
0860    char *next_char_ptr = reinterpret_cast<char*>(next_block);
0861    size_type distance = (size_type)(next_char_ptr - this_char_ptr)/Alignment;
0862 
0863    if(distance >= (m_header.m_size/Alignment)){
0864       //"next_block" does not exist so we can't expand "block"
0865       return 0;
0866    }
0867 
0868    if(!next_block->m_next)
0869       return 0;
0870 
0871    return next_block;
0872 }
0873 
0874 template<class MutexFamily, class VoidPointer>
0875 inline
0876    std::pair<typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
0877             ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *>
0878    simple_seq_fit_impl<MutexFamily, VoidPointer>::
0879       priv_prev_block_if_free
0880          (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr)
0881 {
0882    typedef std::pair<block_ctrl *, block_ctrl *> prev_pair_t;
0883    //Take the address where the previous block should go
0884    block_ctrl *root           = &m_header.m_root;
0885    block_ctrl *prev_2_block   = root;
0886    block_ctrl *prev_block = ipcdetail::to_raw_pointer(root->m_next);
0887 
0888    while((reinterpret_cast<char*>(prev_block) + prev_block->m_size*Alignment)
0889             != reinterpret_cast<char*>(ptr)
0890          && prev_block != root){
0891       prev_2_block = prev_block;
0892       prev_block = ipcdetail::to_raw_pointer(prev_block->m_next);
0893    }
0894 
0895    if(prev_block == root || !prev_block->m_next)
0896       return prev_pair_t(static_cast<block_ctrl*>(0), static_cast<block_ctrl*>(0));
0897 
0898    //Check if the previous block is in the managed segment
0899    char *this_char_ptr = reinterpret_cast<char*>(this);
0900    char *prev_char_ptr = reinterpret_cast<char*>(prev_block);
0901    size_type distance = (size_type)(prev_char_ptr - this_char_ptr)/Alignment;
0902 
0903    if(distance >= (m_header.m_size/Alignment)){
0904       //"previous_block" does not exist so we can't expand "block"
0905       return prev_pair_t(static_cast<block_ctrl*>(0), static_cast<block_ctrl*>(0));
0906    }
0907    return prev_pair_t(prev_2_block, prev_block);
0908 }
0909 
0910 
0911 template<class MutexFamily, class VoidPointer>
0912 inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
0913    priv_expand (void *ptr, size_type min_size, size_type &received_size)
0914 {
0915    size_type preferred_size = received_size;
0916    //Obtain the real size of the block
0917    block_ctrl *block = move_detail::force_ptr<block_ctrl*>(priv_get_block(ptr));
0918    size_type old_block_size = block->m_size;
0919 
0920    //All used blocks' next is marked with 0 so check it
0921    BOOST_ASSERT(block->m_next == 0);
0922 
0923    //Put this to a safe value
0924    received_size = old_block_size*Alignment - BlockCtrlBytes;
0925 
0926    //Now translate it to Alignment units
0927    min_size       = ipcdetail::get_rounded_size(min_size, Alignment)/Alignment;
0928    preferred_size = ipcdetail::get_rounded_size(preferred_size, Alignment)/Alignment;
0929 
0930    //Some parameter checks
0931    if(min_size > preferred_size)
0932       return false;
0933 
0934    size_type data_size = old_block_size - BlockCtrlUnits;
0935 
0936    if(data_size >= min_size)
0937       return true;
0938 
0939    block_ctrl *next_block = priv_next_block_if_free(block);
0940    if(!next_block){
0941       return false;
0942    }
0943 
0944    //Is "block" + "next_block" big enough?
0945    size_type merged_size = old_block_size + next_block->m_size;
0946 
0947    //Now we can expand this block further than before
0948    received_size = merged_size*Alignment - BlockCtrlBytes;
0949 
0950    if(merged_size < (min_size + BlockCtrlUnits)){
0951       return false;
0952    }
0953 
0954    //We can fill expand. Merge both blocks,
0955    block->m_next = next_block->m_next;
0956    block->m_size = merged_size;
0957 
0958    //Find the previous free block of next_block
0959    block_ctrl *prev = &m_header.m_root;
0960    while(ipcdetail::to_raw_pointer(prev->m_next) != next_block){
0961       prev = ipcdetail::to_raw_pointer(prev->m_next);
0962    }
0963 
0964    //Now insert merged block in the free list
0965    //This allows reusing allocation logic in this function
0966    m_header.m_allocated -= old_block_size*Alignment;
0967    prev->m_next = block;
0968 
0969    //Now use check and allocate to do the allocation logic
0970    preferred_size += BlockCtrlUnits;
0971    size_type nunits = preferred_size < merged_size ? preferred_size : merged_size;
0972 
0973    //This must success since nunits is less than merged_size!
0974    if(!this->priv_check_and_allocate (nunits, prev, block, received_size)){
0975       //Something very ugly is happening here. This is a bug
0976       //or there is memory corruption
0977       BOOST_ASSERT(0);
0978       return false;
0979    }
0980    return true;
0981 }
0982 
0983 template<class MutexFamily, class VoidPointer> inline
0984 void* simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_check_and_allocate
0985    (size_type nunits
0986    ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl* prev
0987    ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl* block
0988    ,size_type &received_size)
0989 {
0990    size_type upper_nunits = nunits + BlockCtrlUnits;
0991    bool found = false;
0992 
0993    if (block->m_size > upper_nunits){
0994       //This block is bigger than needed, split it in
0995       //two blocks, the first's size will be "units"
0996       //the second's size will be "block->m_size-units"
0997       size_type total_size = block->m_size;
0998       block->m_size  = nunits;
0999 
1000       block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
1001          (reinterpret_cast<char*>(block) + Alignment*nunits);
1002       new_block->m_size  = total_size - nunits;
1003       new_block->m_next  = block->m_next;
1004       prev->m_next = new_block;
1005       found = true;
1006    }
1007    else if (block->m_size >= nunits){
1008       //This block has exactly the right size with an extra
1009       //unusable extra bytes.
1010       prev->m_next = block->m_next;
1011       found = true;
1012    }
1013 
1014    if(found){
1015       //We need block_ctrl for deallocation stuff, so
1016       //return memory user can overwrite
1017       m_header.m_allocated += block->m_size*Alignment;
1018       received_size =  block->get_user_bytes();
1019       //Mark the block as allocated
1020       block->m_next = 0;
1021       //Check alignment
1022       algo_impl_t::assert_alignment(block);
1023       return priv_get_user_buffer(block);
1024    }
1025    return 0;
1026 }
1027 
1028 template<class MutexFamily, class VoidPointer>
1029 void simple_seq_fit_impl<MutexFamily, VoidPointer>::deallocate(void* addr)
1030 {
1031    if(!addr)   return;
1032    //-----------------------
1033    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
1034    //-----------------------
1035    return this->priv_deallocate(addr);
1036 }
1037 
1038 template<class MutexFamily, class VoidPointer>
1039 void simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_deallocate(void* addr)
1040 {
1041    if(!addr)   return;
1042 
1043    //Let's get free block list. List is always sorted
1044    //by memory address to allow block merging.
1045    //Pointer next always points to the first
1046    //(lower address) block
1047    block_ctrl * prev  = &m_header.m_root;
1048    block_ctrl * pos   = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
1049    block_ctrl * block = move_detail::force_ptr<block_ctrl*>(priv_get_block(addr));
1050 
1051    //All used blocks' next is marked with 0 so check it
1052    BOOST_ASSERT(block->m_next == 0);
1053 
1054    //Check if alignment and block size are right
1055    algo_impl_t::assert_alignment(addr);
1056 
1057    size_type total_size = Alignment*block->m_size;
1058    BOOST_ASSERT(m_header.m_allocated >= total_size);
1059 
1060    //Update used memory count
1061    m_header.m_allocated -= total_size;
1062 
1063    //Let's find the previous and the next block of the block to deallocate
1064    //This ordering comparison must be done with original pointers
1065    //types since their mapping to raw pointers can be different
1066    //in each process
1067    while((ipcdetail::to_raw_pointer(pos) != &m_header.m_root) && (block > pos)){
1068       prev = pos;
1069       pos = ipcdetail::to_raw_pointer(pos->m_next);
1070    }
1071 
1072    //Try to combine with upper block
1073    char *block_char_ptr = reinterpret_cast<char*>(ipcdetail::to_raw_pointer(block));
1074 
1075    if ((block_char_ptr + Alignment*block->m_size) ==
1076          reinterpret_cast<char*>(ipcdetail::to_raw_pointer(pos))){
1077       block->m_size += pos->m_size;
1078       block->m_next  = pos->m_next;
1079    }
1080    else{
1081       block->m_next = pos;
1082    }
1083 
1084    //Try to combine with lower block
1085    if ((reinterpret_cast<char*>(ipcdetail::to_raw_pointer(prev))
1086             + Alignment*prev->m_size) ==
1087         block_char_ptr){
1088 
1089 
1090       prev->m_size += block->m_size;
1091       prev->m_next  = block->m_next;
1092    }
1093    else{
1094       prev->m_next = block;
1095    }
1096 }
1097 
1098 }  //namespace ipcdetail {
1099 
1100 }  //namespace interprocess {
1101 
1102 }  //namespace boost {
1103 
1104 #include <boost/interprocess/detail/config_end.hpp>
1105 
1106 #endif   //#ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
1107