Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-09-18 08:46:55

0001 //////////////////////////////////////////////////////////////////////////////
0002 //
0003 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
0004 // Software License, Version 1.0. (See accompanying file
0005 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
0006 //
0007 // See http://www.boost.org/libs/interprocess for documentation.
0008 //
0009 //////////////////////////////////////////////////////////////////////////////
0010 
0011 #ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
0012 #define BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
0013 
0014 #ifndef BOOST_CONFIG_HPP
0015 #  include <boost/config.hpp>
0016 #endif
0017 #
0018 #if defined(BOOST_HAS_PRAGMA_ONCE)
0019 #  pragma once
0020 #endif
0021 
0022 #include <boost/interprocess/detail/config_begin.hpp>
0023 #include <boost/interprocess/detail/workaround.hpp>
0024 
0025 #include <boost/intrusive/pointer_traits.hpp>
0026 
0027 #include <boost/interprocess/interprocess_fwd.hpp>
0028 #include <boost/interprocess/containers/allocation_type.hpp>
0029 #include <boost/container/detail/multiallocation_chain.hpp>
0030 #include <boost/interprocess/offset_ptr.hpp>
0031 #include <boost/interprocess/sync/interprocess_mutex.hpp>
0032 #include <boost/interprocess/exceptions.hpp>
0033 #include <boost/interprocess/detail/utilities.hpp>
0034 #include <boost/interprocess/detail/min_max.hpp>
0035 #include <boost/interprocess/detail/type_traits.hpp>
0036 #include <boost/interprocess/sync/scoped_lock.hpp>
0037 #include <boost/intrusive/pointer_traits.hpp>
0038 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
0039 #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
0040 #include <boost/move/detail/force_ptr.hpp>
0041 #include <boost/intrusive/detail/minimal_pair_header.hpp>
0042 #include <cstring>
0043 #include <boost/assert.hpp>
0044 
0045 //!\file
0046 //!Describes sequential fit algorithm used to allocate objects in shared memory.
0047 //!This class is intended as a base class for single segment and multi-segment
0048 //!implementations.
0049 
0050 namespace boost {
0051 namespace interprocess {
0052 namespace ipcdetail {
0053 
0054 //!This class implements the simple sequential fit algorithm with a simply
0055 //!linked list of free buffers.
0056 //!This class is intended as a base class for single segment and multi-segment
0057 //!implementations.
0058 template<class MutexFamily, class VoidPointer>
0059 class simple_seq_fit_impl
0060 {
0061    //Non-copyable
0062    simple_seq_fit_impl();
0063    simple_seq_fit_impl(const simple_seq_fit_impl &);
0064    simple_seq_fit_impl &operator=(const simple_seq_fit_impl &);
0065 
0066    typedef typename boost::intrusive::
0067       pointer_traits<VoidPointer>::template
0068          rebind_pointer<char>::type                         char_ptr;
0069 
0070    public:
0071 
0072    //!Shared interprocess_mutex family used for the rest of the Interprocess framework
0073    typedef MutexFamily        mutex_family;
0074    //!Pointer type to be used with the rest of the Interprocess framework
0075    typedef VoidPointer        void_pointer;
0076    typedef boost::container::dtl::
0077       basic_multiallocation_chain<VoidPointer>     multiallocation_chain;
0078 
0079    typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
0080    typedef typename boost::container::dtl::make_unsigned<difference_type>::type size_type;
0081 
0082 
0083    private:
0084    class block_ctrl;
0085    friend class block_ctrl;
0086 
0087    typedef typename boost::intrusive::
0088       pointer_traits<VoidPointer>::template
0089          rebind_pointer<block_ctrl>::type                   block_ctrl_ptr;
0090 
0091    //!Block control structure
0092    class block_ctrl
0093    {
0094       public:
0095       static const size_type size_mask = size_type(-1);
0096       //!Offset pointer to the next block.
0097       block_ctrl_ptr m_next;
0098       //!This block's memory size (including block_ctrl
0099       //!header) in BasicSize units
0100       size_type    m_size;
0101 
0102       size_type get_user_bytes() const
0103       {  return this->m_size*Alignment - BlockCtrlBytes; }
0104 
0105       size_type get_total_bytes() const
0106       {  return this->m_size*Alignment; }
0107    };
0108 
0109    //!Shared interprocess_mutex to protect memory allocate/deallocate
0110    typedef typename MutexFamily::mutex_type        interprocess_mutex;
0111 
0112    //!This struct includes needed data and derives from
0113    //!interprocess_mutex to allow EBO when using null interprocess_mutex
0114    struct header_t : public interprocess_mutex
0115    {
0116       //!Pointer to the first free block
0117       block_ctrl        m_root;
0118       //!Allocated bytes for internal checking
0119       size_type         m_allocated;
0120       //!The size of the memory segment
0121       size_type         m_size;
0122       //!The extra size required by the segment
0123       size_type         m_extra_hdr_bytes;
0124    }  m_header;
0125 
0126    friend class ipcdetail::memory_algorithm_common<simple_seq_fit_impl>;
0127 
0128    typedef ipcdetail::memory_algorithm_common<simple_seq_fit_impl> algo_impl_t;
0129 
0130    public:
0131    //!Constructor. "size" is the total size of the managed memory segment,
0132    //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(simple_seq_fit_impl)
0133    //!offset that the allocator should not use at all.
0134    simple_seq_fit_impl           (size_type size, size_type extra_hdr_bytes);
0135 
0136    //!Destructor
0137    ~simple_seq_fit_impl();
0138 
0139    //!Obtains the minimum size needed by the algorithm
0140    static size_type get_min_size (size_type extra_hdr_bytes);
0141 
0142    //Functions for single segment management
0143 
0144    //!Allocates bytes, returns 0 if there is not more memory
0145    void* allocate             (size_type nbytes);
0146 
0147    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0148 
0149    template<class T>
0150    T *allocation_command  (boost::interprocess::allocation_type command,   size_type limit_size,
0151                            size_type &prefer_in_recvd_out_size, T *&reuse);
0152 
0153    void * raw_allocation_command  (boost::interprocess::allocation_type command,   size_type limit_size,
0154                                size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object = 1);
0155 
0156    //!Multiple element allocation, same size
0157    //!Experimental. Dont' use
0158    void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
0159    {
0160       //-----------------------
0161       boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0162       //-----------------------
0163       algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
0164    }
0165 
0166    //!Multiple element allocation, different size
0167    //!Experimental. Dont' use
0168    void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
0169    {
0170       //-----------------------
0171       boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0172       //-----------------------
0173       algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
0174    }
0175 
0176    //!Multiple element deallocation
0177    //!Experimental. Dont' use
0178    void deallocate_many(multiallocation_chain &chain);
0179 
0180    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
0181 
0182    //!Deallocates previously allocated bytes
0183    void   deallocate          (void *addr);
0184 
0185    //!Returns the size of the memory segment
0186    size_type get_size()  const;
0187 
0188    //!Returns the number of free bytes of the memory segment
0189    size_type get_free_memory()  const;
0190 
0191    //!Increases managed memory in extra_size bytes more
0192    void grow(size_type extra_size);
0193 
0194    //!Decreases managed memory as much as possible
0195    void shrink_to_fit();
0196 
0197    //!Returns true if all allocated memory has been deallocated
0198    bool all_memory_deallocated();
0199 
0200    //!Makes an internal sanity check and returns true if success
0201    bool check_sanity();
0202 
0203    //!Initializes to zero all the memory that's not in use.
0204    //!This function is normally used for security reasons.
0205    void zero_free_memory();
0206 
0207    //!Returns the size of the buffer previously allocated pointed by ptr
0208    size_type size(const void *ptr) const;
0209 
0210    //!Allocates aligned bytes, returns 0 if there is not more memory.
0211    //!Alignment must be power of 2
0212    void* allocate_aligned     (size_type nbytes, size_type alignment);
0213 
0214    private:
0215 
0216    //!Obtains the pointer returned to the user from the block control
0217    static void *priv_get_user_buffer(const block_ctrl *block);
0218 
0219    //!Obtains the block control structure of the user buffer
0220    static block_ctrl *priv_get_block(const void *ptr);
0221 
0222    //!Real allocation algorithm with min allocation option
0223    void * priv_allocate(boost::interprocess::allocation_type command
0224                         ,size_type min_size
0225                         ,size_type &prefer_in_recvd_out_size, void *&reuse_ptr);
0226 
0227    void * priv_allocation_command(boost::interprocess::allocation_type command
0228                                  ,size_type min_size
0229                                  ,size_type &prefer_in_recvd_out_size
0230                                  ,void *&reuse_ptr
0231                                  ,size_type sizeof_object);
0232 
0233    //!Returns the number of total units that a user buffer
0234    //!of "userbytes" bytes really occupies (including header)
0235    static size_type priv_get_total_units(size_type userbytes);
0236 
0237    static size_type priv_first_block_offset(const void *this_ptr, size_type extra_hdr_bytes);
0238    size_type priv_block_end_offset() const;
0239 
0240    //!Returns next block if it's free.
0241    //!Returns 0 if next block is not free.
0242    block_ctrl *priv_next_block_if_free(block_ctrl *ptr);
0243 
0244    //!Check if this block is free (not allocated)
0245    bool priv_is_allocated_block(block_ctrl *ptr);
0246 
0247    //!Returns previous block's if it's free.
0248    //!Returns 0 if previous block is not free.
0249    std::pair<block_ctrl*, block_ctrl*> priv_prev_block_if_free(block_ctrl *ptr);
0250 
0251    //!Real expand function implementation
0252    bool priv_expand(void *ptr, size_type min_size, size_type &prefer_in_recvd_out_size);
0253 
0254    //!Real expand to both sides implementation
0255    void* priv_expand_both_sides(boost::interprocess::allocation_type command
0256                                ,size_type min_size, size_type &prefer_in_recvd_out_size
0257                                ,void *reuse_ptr
0258                                ,bool only_preferred_backwards);
0259 
0260    //!Real private aligned allocation function
0261    //void* priv_allocate_aligned     (size_type nbytes, size_type alignment);
0262 
0263    //!Checks if block has enough memory and splits/unlinks the block
0264    //!returning the address to the users
0265    void* priv_check_and_allocate(size_type units
0266                                 ,block_ctrl* prev
0267                                 ,block_ctrl* block
0268                                 ,size_type &received_size);
0269    //!Real deallocation algorithm
0270    void priv_deallocate(void *addr);
0271 
0272    //!Makes a new memory portion available for allocation
0273    void priv_add_segment(void *addr, size_type size);
0274 
0275    void priv_mark_new_allocated_block(block_ctrl *block);
0276 
0277    public:
0278    static const size_type Alignment      = ::boost::container::dtl::alignment_of
0279       < ::boost::container::dtl::max_align_t>::value;
0280    private:
0281    static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
0282    static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
0283    static const size_type AllocatedCtrlBytes = BlockCtrlBytes;
0284    static const size_type AllocatedCtrlUnits = BlockCtrlUnits;
0285    static const size_type UsableByPreviousChunk = 0;
0286 
0287    public:
0288    static const size_type PayloadPerAllocation = BlockCtrlBytes;
0289 };
0290 
0291 template<class MutexFamily, class VoidPointer>
0292 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0293 simple_seq_fit_impl<MutexFamily, VoidPointer>
0294    ::priv_first_block_offset(const void *this_ptr, size_type extra_hdr_bytes)
0295 {
0296    //First align "this" pointer
0297    size_type uint_this         = (std::size_t)this_ptr;
0298    size_type uint_aligned_this = uint_this/Alignment*Alignment;
0299    size_type this_disalignment = (uint_this - uint_aligned_this);
0300    size_type block1_off =
0301       ipcdetail::get_rounded_size(sizeof(simple_seq_fit_impl) + extra_hdr_bytes + this_disalignment, Alignment)
0302       - this_disalignment;
0303    algo_impl_t::assert_alignment(this_disalignment + block1_off);
0304    return block1_off;
0305 }
0306 
0307 template<class MutexFamily, class VoidPointer>
0308 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0309 simple_seq_fit_impl<MutexFamily, VoidPointer>
0310    ::priv_block_end_offset() const
0311 {
0312    //First align "this" pointer
0313    size_type uint_this         = (std::size_t)this;
0314    size_type uint_aligned_this = uint_this/Alignment*Alignment;
0315    size_type this_disalignment = (uint_this - uint_aligned_this);
0316    size_type old_end =
0317       ipcdetail::get_truncated_size(m_header.m_size + this_disalignment, Alignment)
0318       - this_disalignment;
0319    algo_impl_t::assert_alignment(old_end + this_disalignment);
0320    return old_end;
0321 }
0322 
0323 template<class MutexFamily, class VoidPointer>
0324 inline simple_seq_fit_impl<MutexFamily, VoidPointer>::
0325    simple_seq_fit_impl(size_type segment_size, size_type extra_hdr_bytes)
0326 {
0327    //Initialize sizes and counters
0328    m_header.m_allocated = 0;
0329    m_header.m_size      = segment_size;
0330    m_header.m_extra_hdr_bytes = extra_hdr_bytes;
0331 
0332    //Initialize pointers
0333    size_type block1_off = priv_first_block_offset(this, extra_hdr_bytes);
0334 
0335    m_header.m_root.m_next  = move_detail::force_ptr<block_ctrl*>
0336       ((reinterpret_cast<char*>(this) + block1_off));
0337    algo_impl_t::assert_alignment(ipcdetail::to_raw_pointer(m_header.m_root.m_next));
0338    m_header.m_root.m_next->m_size  = (segment_size - block1_off)/Alignment;
0339    m_header.m_root.m_next->m_next  = &m_header.m_root;
0340 }
0341 
0342 template<class MutexFamily, class VoidPointer>
0343 inline simple_seq_fit_impl<MutexFamily, VoidPointer>::~simple_seq_fit_impl()
0344 {
0345    //There is a memory leak!
0346 //   BOOST_ASSERT(m_header.m_allocated == 0);
0347 //   BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
0348 }
0349 
0350 template<class MutexFamily, class VoidPointer>
0351 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::grow(size_type extra_size)
0352 {
0353    //Old highest address block's end offset
0354    size_type old_end = this->priv_block_end_offset();
0355 
0356    //Update managed buffer's size
0357    m_header.m_size += extra_size;
0358 
0359    //We need at least MinBlockSize blocks to create a new block
0360    if((m_header.m_size - old_end) < BlockCtrlBytes){
0361       return;
0362    }
0363 
0364    //We'll create a new free block with extra_size bytes
0365 
0366    block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
0367       (reinterpret_cast<char*>(this) + old_end);
0368 
0369    algo_impl_t::assert_alignment(new_block);
0370    new_block->m_next = 0;
0371    new_block->m_size = (m_header.m_size - old_end)/Alignment;
0372    m_header.m_allocated += new_block->m_size*Alignment;
0373    this->priv_deallocate(priv_get_user_buffer(new_block));
0374 }
0375 
0376 template<class MutexFamily, class VoidPointer>
0377 void simple_seq_fit_impl<MutexFamily, VoidPointer>::shrink_to_fit()
0378 {
0379    //Get the root and the first memory block
0380    block_ctrl *prev                 = &m_header.m_root;
0381    block_ctrl *last                 = &m_header.m_root;
0382    block_ctrl *block                = ipcdetail::to_raw_pointer(last->m_next);
0383    block_ctrl *root                 = &m_header.m_root;
0384 
0385    //No free block?
0386    if(block == root) return;
0387 
0388    //Iterate through the free block list
0389    while(block != root){
0390       prev  = last;
0391       last  = block;
0392       block = ipcdetail::to_raw_pointer(block->m_next);
0393    }
0394 
0395    char *last_free_end_address   = reinterpret_cast<char*>(last) + last->m_size*Alignment;
0396    if(last_free_end_address != (reinterpret_cast<char*>(this) + priv_block_end_offset())){
0397       //there is an allocated block in the end of this block
0398       //so no shrinking is possible
0399       return;
0400    }
0401 
0402    //Check if have only 1 big free block
0403    void *unique_block = 0;
0404    if(!m_header.m_allocated){
0405       BOOST_ASSERT(prev == root);
0406       size_type ignore_recvd = 0;
0407       void *ignore_reuse = 0;
0408       unique_block = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
0409       if(!unique_block)
0410          return;
0411       last = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
0412       BOOST_ASSERT(last_free_end_address == (reinterpret_cast<char*>(last) + last->m_size*Alignment));
0413    }
0414    size_type last_units = last->m_size;
0415 
0416    size_type received_size;
0417    void *addr = priv_check_and_allocate(last_units, prev, last, received_size);
0418    (void)addr;
0419    BOOST_ASSERT(addr);
0420    BOOST_ASSERT(received_size == last_units*Alignment - AllocatedCtrlBytes);
0421 
0422    //Shrink it
0423    m_header.m_size /= Alignment;
0424    m_header.m_size -= last->m_size;
0425    m_header.m_size *= Alignment;
0426    m_header.m_allocated -= last->m_size*Alignment;
0427 
0428    if(unique_block)
0429       priv_deallocate(unique_block);
0430 }
0431 
0432 template<class MutexFamily, class VoidPointer>
0433 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
0434    priv_mark_new_allocated_block(block_ctrl *new_block)
0435 {
0436    new_block->m_next = 0;
0437 }
0438 
0439 template<class MutexFamily, class VoidPointer>
0440 inline
0441 typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
0442    simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_get_block(const void *ptr)
0443 {
0444    return const_cast<block_ctrl*>(move_detail::force_ptr<const block_ctrl*>
0445       (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
0446 }
0447 
0448 template<class MutexFamily, class VoidPointer>
0449 inline
0450 void *simple_seq_fit_impl<MutexFamily, VoidPointer>::
0451       priv_get_user_buffer(const typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *block)
0452 {
0453    return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes);
0454 }
0455 
0456 template<class MutexFamily, class VoidPointer>
0457 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_add_segment(void *addr, size_type segment_size)
0458 {
0459    algo_impl_t::assert_alignment(addr);
0460    //Check size
0461    BOOST_ASSERT(!(segment_size < BlockCtrlBytes));
0462    if(segment_size < BlockCtrlBytes)
0463       return;
0464    //Construct big block using the new segment
0465    block_ctrl *new_block   = static_cast<block_ctrl *>(addr);
0466    new_block->m_size       = segment_size/Alignment;
0467    new_block->m_next       = 0;
0468    //Simulate this block was previously allocated
0469    m_header.m_allocated   += new_block->m_size*Alignment;
0470    //Return block and insert it in the free block list
0471    this->priv_deallocate(priv_get_user_buffer(new_block));
0472 }
0473 
0474 template<class MutexFamily, class VoidPointer>
0475 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0476 simple_seq_fit_impl<MutexFamily, VoidPointer>::get_size()  const
0477    {  return m_header.m_size;  }
0478 
0479 template<class MutexFamily, class VoidPointer>
0480 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0481 simple_seq_fit_impl<MutexFamily, VoidPointer>::get_free_memory()  const
0482 {
0483    return m_header.m_size - m_header.m_allocated -
0484       algo_impl_t::multiple_of_units(sizeof(*this) + m_header.m_extra_hdr_bytes);
0485 }
0486 
0487 template<class MutexFamily, class VoidPointer>
0488 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0489 simple_seq_fit_impl<MutexFamily, VoidPointer>::
0490    get_min_size (size_type extra_hdr_bytes)
0491 {
0492    return ipcdetail::get_rounded_size((size_type)sizeof(simple_seq_fit_impl),Alignment) +
0493           ipcdetail::get_rounded_size(extra_hdr_bytes,Alignment)
0494           + BlockCtrlBytes;
0495 }
0496 
0497 template<class MutexFamily, class VoidPointer>
0498 inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
0499     all_memory_deallocated()
0500 {
0501    //-----------------------
0502    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0503    //-----------------------
0504    return m_header.m_allocated == 0 &&
0505           ipcdetail::to_raw_pointer(m_header.m_root.m_next->m_next) == &m_header.m_root;
0506 }
0507 
0508 template<class MutexFamily, class VoidPointer>
0509 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::zero_free_memory()
0510 {
0511    //-----------------------
0512    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0513    //-----------------------
0514    block_ctrl *block = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
0515 
0516    //Iterate through all free portions
0517    do{
0518       //Just clear user the memory part reserved for the user
0519       std::memset( priv_get_user_buffer(block)
0520                  , 0
0521              , block->get_user_bytes());
0522       block = ipcdetail::to_raw_pointer(block->m_next);
0523    }
0524    while(block != &m_header.m_root);
0525 }
0526 
0527 template<class MutexFamily, class VoidPointer>
0528 inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
0529     check_sanity()
0530 {
0531    //-----------------------
0532    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0533    //-----------------------
0534    block_ctrl *block = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
0535 
0536    size_type free_memory = 0;
0537 
0538    //Iterate through all blocks obtaining their size
0539    while(block != &m_header.m_root){
0540       algo_impl_t::assert_alignment(block);
0541       if(!algo_impl_t::check_alignment(block))
0542          return false;
0543       //Free blocks's next must be always valid
0544       block_ctrl *next = ipcdetail::to_raw_pointer(block->m_next);
0545       if(!next){
0546          return false;
0547       }
0548       free_memory += block->m_size*Alignment;
0549       block = next;
0550    }
0551 
0552    //Check allocated bytes are less than size
0553    if(m_header.m_allocated > m_header.m_size){
0554       return false;
0555    }
0556 
0557    //Check free bytes are less than size
0558    if(free_memory > m_header.m_size){
0559       return false;
0560    }
0561    return true;
0562 }
0563 
0564 template<class MutexFamily, class VoidPointer>
0565 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0566    allocate(size_type nbytes)
0567 {
0568    //-----------------------
0569    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0570    //-----------------------
0571    size_type ignore_recvd = nbytes;
0572    void *ignore_reuse = 0;
0573    return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
0574 }
0575 
0576 template<class MutexFamily, class VoidPointer>
0577 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0578    allocate_aligned(size_type nbytes, size_type alignment)
0579 {
0580    //-----------------------
0581    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0582    //-----------------------
0583    return algo_impl_t::
0584       allocate_aligned(this, nbytes, alignment);
0585 }
0586 
0587 template<class MutexFamily, class VoidPointer>
0588 template<class T>
0589 inline T* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0590    allocation_command  (boost::interprocess::allocation_type command,   size_type limit_size,
0591                         size_type &prefer_in_recvd_out_size, T *&reuse_ptr)
0592 {
0593    void *raw_reuse = reuse_ptr;
0594    void * const ret = priv_allocation_command
0595       (command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
0596    BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
0597    reuse_ptr = static_cast<T*>(raw_reuse);
0598    return static_cast<T*>(ret);
0599 }
0600 
0601 template<class MutexFamily, class VoidPointer>
0602 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0603    raw_allocation_command  (boost::interprocess::allocation_type command, size_type limit_objects,
0604                         size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
0605 {
0606    size_type const preferred_objects = prefer_in_recvd_out_size;
0607    if(!sizeof_object){
0608       return reuse_ptr = 0, static_cast<void*>(0);
0609   }
0610    if(command & boost::interprocess::try_shrink_in_place){
0611       if(!reuse_ptr) return static_cast<void*>(0);
0612       prefer_in_recvd_out_size = preferred_objects*sizeof_object;
0613       bool success = algo_impl_t::try_shrink
0614          ( this, reuse_ptr, limit_objects*sizeof_object, prefer_in_recvd_out_size);
0615       prefer_in_recvd_out_size /= sizeof_object;
0616       return success ? reuse_ptr : 0;
0617    }
0618    else{
0619       return priv_allocation_command
0620          (command, limit_objects, prefer_in_recvd_out_size, reuse_ptr, sizeof_object);
0621    }
0622 }
0623 
0624 template<class MutexFamily, class VoidPointer>
0625 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0626    priv_allocation_command (boost::interprocess::allocation_type command,   size_type limit_size,
0627                        size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
0628 {
0629    size_type const preferred_size = prefer_in_recvd_out_size;
0630    command &= ~boost::interprocess::expand_bwd;
0631    if(!command){
0632       return reuse_ptr = 0, static_cast<void*>(0);
0633    }
0634 
0635    size_type max_count = m_header.m_size/sizeof_object;
0636    if(limit_size > max_count || preferred_size > max_count){
0637       return reuse_ptr = 0, static_cast<void*>(0);
0638    }
0639    size_type l_size = limit_size*sizeof_object;
0640    size_type r_size = preferred_size*sizeof_object;
0641    void *ret = 0;
0642    {
0643       //-----------------------
0644       boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0645       //-----------------------
0646       ret = priv_allocate(command, l_size, r_size, reuse_ptr);
0647    }
0648    prefer_in_recvd_out_size = r_size/sizeof_object;
0649    return ret;
0650 }
0651 
0652 template<class MutexFamily, class VoidPointer>
0653 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0654 simple_seq_fit_impl<MutexFamily, VoidPointer>::size(const void *ptr) const
0655 {
0656    //We need no synchronization since this block is not going
0657    //to be modified
0658    //Obtain the real size of the block
0659    const block_ctrl *block = static_cast<const block_ctrl*>(priv_get_block(ptr));
0660    return block->get_user_bytes();
0661 }
0662 
0663 template<class MutexFamily, class VoidPointer>
0664 void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
0665    priv_expand_both_sides(boost::interprocess::allocation_type command
0666                          ,size_type min_size
0667                          ,size_type &prefer_in_recvd_out_size
0668                          ,void *reuse_ptr
0669                          ,bool only_preferred_backwards)
0670 {
0671    size_type const preferred_size = prefer_in_recvd_out_size;
0672    typedef std::pair<block_ctrl *, block_ctrl *> prev_block_t;
0673    block_ctrl *reuse = priv_get_block(reuse_ptr);
0674    prefer_in_recvd_out_size = 0;
0675 
0676    if(this->size(reuse_ptr) > min_size){
0677       prefer_in_recvd_out_size = this->size(reuse_ptr);
0678       return reuse_ptr;
0679    }
0680 
0681    if(command & boost::interprocess::expand_fwd){
0682       if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
0683          return reuse_ptr;
0684    }
0685    else{
0686       prefer_in_recvd_out_size = this->size(reuse_ptr);
0687    }
0688    if(command & boost::interprocess::expand_bwd){
0689       size_type extra_forward = !prefer_in_recvd_out_size ? 0 : prefer_in_recvd_out_size + BlockCtrlBytes;
0690       prev_block_t prev_pair = priv_prev_block_if_free(reuse);
0691       block_ctrl *prev = prev_pair.second;
0692       if(!prev){
0693          return 0;
0694       }
0695 
0696       size_type needs_backwards =
0697          ipcdetail::get_rounded_size(preferred_size - extra_forward, Alignment);
0698 
0699       if(!only_preferred_backwards){
0700             max_value(ipcdetail::get_rounded_size(min_size - extra_forward, Alignment)
0701                      ,min_value(prev->get_user_bytes(), needs_backwards));
0702       }
0703 
0704       //Check if previous block has enough size
0705       if((prev->get_user_bytes()) >=  needs_backwards){
0706          //Now take all next space. This will succeed
0707          if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, prefer_in_recvd_out_size)){
0708             BOOST_ASSERT(0);
0709          }
0710 
0711          //We need a minimum size to split the previous one
0712          if((prev->get_user_bytes() - needs_backwards) > 2*BlockCtrlBytes){
0713              block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
0714                   (reinterpret_cast<char*>(reuse) - needs_backwards - BlockCtrlBytes);
0715 
0716             new_block->m_next = 0;
0717             new_block->m_size =
0718                BlockCtrlUnits + (needs_backwards + extra_forward)/Alignment;
0719             prev->m_size =
0720                (prev->get_total_bytes() - needs_backwards)/Alignment - BlockCtrlUnits;
0721             prefer_in_recvd_out_size = needs_backwards + extra_forward;
0722             m_header.m_allocated += needs_backwards + BlockCtrlBytes;
0723             return priv_get_user_buffer(new_block);
0724          }
0725          else{
0726             //Just merge the whole previous block
0727             block_ctrl *prev_2_block = prev_pair.first;
0728             //Update received size and allocation
0729             prefer_in_recvd_out_size = extra_forward + prev->get_user_bytes();
0730             m_header.m_allocated += prev->get_total_bytes();
0731             //Now unlink it from previous block
0732             prev_2_block->m_next = prev->m_next;
0733             prev->m_size = reuse->m_size + prev->m_size;
0734             prev->m_next = 0;
0735             priv_get_user_buffer(prev);
0736          }
0737       }
0738    }
0739    return 0;
0740 }
0741 
0742 template<class MutexFamily, class VoidPointer>
0743 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
0744    deallocate_many(typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_chain &chain)
0745 {
0746    //-----------------------
0747    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
0748    //-----------------------
0749    while(!chain.empty()){
0750       this->priv_deallocate(to_raw_pointer(chain.pop_front()));
0751    }
0752 }
0753 
0754 template<class MutexFamily, class VoidPointer>
0755 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
0756 simple_seq_fit_impl<MutexFamily, VoidPointer>::
0757    priv_get_total_units(size_type userbytes)
0758 {
0759    size_type s = ipcdetail::get_rounded_size(userbytes, Alignment)/Alignment;
0760    if(!s)   ++s;
0761    return BlockCtrlUnits + s;
0762 }
0763 
0764 template<class MutexFamily, class VoidPointer>
0765 void * simple_seq_fit_impl<MutexFamily, VoidPointer>::
0766    priv_allocate(boost::interprocess::allocation_type command
0767                 ,size_type limit_size, size_type &prefer_in_recvd_out_size, void *&reuse_ptr)
0768 {
0769    size_type const preferred_size = prefer_in_recvd_out_size;
0770    if(command & boost::interprocess::shrink_in_place){
0771       if(!reuse_ptr)  return static_cast<void*>(0);
0772       bool success = algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size);
0773       return success ? reuse_ptr : 0;
0774    }
0775    prefer_in_recvd_out_size = 0;
0776 
0777    if(limit_size > preferred_size){
0778       return reuse_ptr = 0, static_cast<void*>(0);
0779    }
0780 
0781    //Number of units to request (including block_ctrl header)
0782    size_type nunits = ipcdetail::get_rounded_size(preferred_size, Alignment)/Alignment + BlockCtrlUnits;
0783 
0784    //Get the root and the first memory block
0785    block_ctrl *prev                 = &m_header.m_root;
0786    block_ctrl *block                = ipcdetail::to_raw_pointer(prev->m_next);
0787    block_ctrl *root                 = &m_header.m_root;
0788    block_ctrl *biggest_block        = 0;
0789    block_ctrl *prev_biggest_block   = 0;
0790    size_type biggest_size         = 0;
0791 
0792    //Expand in place
0793    if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
0794       void *ret = priv_expand_both_sides(command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, true);
0795       if(ret){
0796          algo_impl_t::assert_alignment(ret);
0797          return ret;
0798       }
0799    }
0800 
0801    if(command & boost::interprocess::allocate_new){
0802       prefer_in_recvd_out_size = 0;
0803       while(block != root){
0804          //Update biggest block pointers
0805          if(block->m_size > biggest_size){
0806             prev_biggest_block = prev;
0807             biggest_size  = block->m_size;
0808             biggest_block = block;
0809          }
0810          algo_impl_t::assert_alignment(block);
0811          void *addr = this->priv_check_and_allocate(nunits, prev, block, prefer_in_recvd_out_size);
0812          if(addr){
0813             algo_impl_t::assert_alignment(addr);
0814             return reuse_ptr = 0, addr;
0815          }
0816          //Bad luck, let's check next block
0817          prev  = block;
0818          block = ipcdetail::to_raw_pointer(block->m_next);
0819       }
0820 
0821       //Bad luck finding preferred_size, now if we have any biggest_block
0822       //try with this block
0823       if(biggest_block){
0824          size_type limit_units = ipcdetail::get_rounded_size(limit_size, Alignment)/Alignment + BlockCtrlUnits;
0825          if(biggest_block->m_size < limit_units){
0826             return reuse_ptr = 0, static_cast<void*>(0);
0827          }
0828          void *ret = this->priv_check_and_allocate
0829             (biggest_block->m_size, prev_biggest_block, biggest_block, prefer_in_recvd_out_size = biggest_block->m_size*Alignment - BlockCtrlUnits);
0830          BOOST_ASSERT(ret != 0);
0831          algo_impl_t::assert_alignment(ret);
0832          return reuse_ptr = 0, ret;
0833       }
0834    }
0835    //Now try to expand both sides with min size
0836    if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
0837       void *ret = priv_expand_both_sides (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false);
0838       algo_impl_t::assert_alignment(ret);
0839       return ret;
0840    }
0841    return reuse_ptr = 0, static_cast<void*>(0);
0842 }
0843 
0844 template<class MutexFamily, class VoidPointer> inline
0845 bool simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_is_allocated_block
0846       (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *block)
0847 {  return block->m_next == 0;  }
0848 
0849 template<class MutexFamily, class VoidPointer>
0850 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
0851    simple_seq_fit_impl<MutexFamily, VoidPointer>::
0852       priv_next_block_if_free
0853          (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr)
0854 {
0855    //Take the address where the next block should go
0856    block_ctrl *next_block = move_detail::force_ptr<block_ctrl*>
0857       (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
0858 
0859    //Check if the adjacent block is in the managed segment
0860    char *this_char_ptr = reinterpret_cast<char*>(this);
0861    char *next_char_ptr = reinterpret_cast<char*>(next_block);
0862    size_type distance = (size_type)(next_char_ptr - this_char_ptr)/Alignment;
0863 
0864    if(distance >= (m_header.m_size/Alignment)){
0865       //"next_block" does not exist so we can't expand "block"
0866       return 0;
0867    }
0868 
0869    if(!next_block->m_next)
0870       return 0;
0871 
0872    return next_block;
0873 }
0874 
0875 template<class MutexFamily, class VoidPointer>
0876 inline
0877    std::pair<typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
0878             ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *>
0879    simple_seq_fit_impl<MutexFamily, VoidPointer>::
0880       priv_prev_block_if_free
0881          (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr)
0882 {
0883    typedef std::pair<block_ctrl *, block_ctrl *> prev_pair_t;
0884    //Take the address where the previous block should go
0885    block_ctrl *root           = &m_header.m_root;
0886    block_ctrl *prev_2_block   = root;
0887    block_ctrl *prev_block = ipcdetail::to_raw_pointer(root->m_next);
0888 
0889    while((reinterpret_cast<char*>(prev_block) + prev_block->m_size*Alignment)
0890             != reinterpret_cast<char*>(ptr)
0891          && prev_block != root){
0892       prev_2_block = prev_block;
0893       prev_block = ipcdetail::to_raw_pointer(prev_block->m_next);
0894    }
0895 
0896    if(prev_block == root || !prev_block->m_next)
0897       return prev_pair_t(static_cast<block_ctrl*>(0), static_cast<block_ctrl*>(0));
0898 
0899    //Check if the previous block is in the managed segment
0900    char *this_char_ptr = reinterpret_cast<char*>(this);
0901    char *prev_char_ptr = reinterpret_cast<char*>(prev_block);
0902    size_type distance = (size_type)(prev_char_ptr - this_char_ptr)/Alignment;
0903 
0904    if(distance >= (m_header.m_size/Alignment)){
0905       //"previous_block" does not exist so we can't expand "block"
0906       return prev_pair_t(static_cast<block_ctrl*>(0), static_cast<block_ctrl*>(0));
0907    }
0908    return prev_pair_t(prev_2_block, prev_block);
0909 }
0910 
0911 
0912 template<class MutexFamily, class VoidPointer>
0913 inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
0914    priv_expand (void *ptr, size_type min_size, size_type &received_size)
0915 {
0916    size_type preferred_size = received_size;
0917    //Obtain the real size of the block
0918    block_ctrl *block = move_detail::force_ptr<block_ctrl*>(priv_get_block(ptr));
0919    size_type old_block_size = block->m_size;
0920 
0921    //All used blocks' next is marked with 0 so check it
0922    BOOST_ASSERT(block->m_next == 0);
0923 
0924    //Put this to a safe value
0925    received_size = old_block_size*Alignment - BlockCtrlBytes;
0926 
0927    //Now translate it to Alignment units
0928    min_size       = ipcdetail::get_rounded_size(min_size, Alignment)/Alignment;
0929    preferred_size = ipcdetail::get_rounded_size(preferred_size, Alignment)/Alignment;
0930 
0931    //Some parameter checks
0932    if(min_size > preferred_size)
0933       return false;
0934 
0935    size_type data_size = old_block_size - BlockCtrlUnits;
0936 
0937    if(data_size >= min_size)
0938       return true;
0939 
0940    block_ctrl *next_block = priv_next_block_if_free(block);
0941    if(!next_block){
0942       return false;
0943    }
0944 
0945    //Is "block" + "next_block" big enough?
0946    size_type merged_size = old_block_size + next_block->m_size;
0947 
0948    //Now we can expand this block further than before
0949    received_size = merged_size*Alignment - BlockCtrlBytes;
0950 
0951    if(merged_size < (min_size + BlockCtrlUnits)){
0952       return false;
0953    }
0954 
0955    //We can fill expand. Merge both blocks,
0956    block->m_next = next_block->m_next;
0957    block->m_size = merged_size;
0958 
0959    //Find the previous free block of next_block
0960    block_ctrl *prev = &m_header.m_root;
0961    while(ipcdetail::to_raw_pointer(prev->m_next) != next_block){
0962       prev = ipcdetail::to_raw_pointer(prev->m_next);
0963    }
0964 
0965    //Now insert merged block in the free list
0966    //This allows reusing allocation logic in this function
0967    m_header.m_allocated -= old_block_size*Alignment;
0968    prev->m_next = block;
0969 
0970    //Now use check and allocate to do the allocation logic
0971    preferred_size += BlockCtrlUnits;
0972    size_type nunits = preferred_size < merged_size ? preferred_size : merged_size;
0973 
0974    //This must success since nunits is less than merged_size!
0975    if(!this->priv_check_and_allocate (nunits, prev, block, received_size)){
0976       //Something very ugly is happening here. This is a bug
0977       //or there is memory corruption
0978       BOOST_ASSERT(0);
0979       return false;
0980    }
0981    return true;
0982 }
0983 
0984 template<class MutexFamily, class VoidPointer> inline
0985 void* simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_check_and_allocate
0986    (size_type nunits
0987    ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl* prev
0988    ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl* block
0989    ,size_type &received_size)
0990 {
0991    size_type upper_nunits = nunits + BlockCtrlUnits;
0992    bool found = false;
0993 
0994    if (block->m_size > upper_nunits){
0995       //This block is bigger than needed, split it in
0996       //two blocks, the first's size will be "units"
0997       //the second's size will be "block->m_size-units"
0998       size_type total_size = block->m_size;
0999       block->m_size  = nunits;
1000 
1001       block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
1002          (reinterpret_cast<char*>(block) + Alignment*nunits);
1003       new_block->m_size  = total_size - nunits;
1004       new_block->m_next  = block->m_next;
1005       prev->m_next = new_block;
1006       found = true;
1007    }
1008    else if (block->m_size >= nunits){
1009       //This block has exactly the right size with an extra
1010       //unusable extra bytes.
1011       prev->m_next = block->m_next;
1012       found = true;
1013    }
1014 
1015    if(found){
1016       //We need block_ctrl for deallocation stuff, so
1017       //return memory user can overwrite
1018       m_header.m_allocated += block->m_size*Alignment;
1019       received_size =  block->get_user_bytes();
1020       //Mark the block as allocated
1021       block->m_next = 0;
1022       //Check alignment
1023       algo_impl_t::assert_alignment(block);
1024       return priv_get_user_buffer(block);
1025    }
1026    return 0;
1027 }
1028 
1029 template<class MutexFamily, class VoidPointer>
1030 void simple_seq_fit_impl<MutexFamily, VoidPointer>::deallocate(void* addr)
1031 {
1032    if(!addr)   return;
1033    //-----------------------
1034    boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
1035    //-----------------------
1036    return this->priv_deallocate(addr);
1037 }
1038 
1039 template<class MutexFamily, class VoidPointer>
1040 void simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_deallocate(void* addr)
1041 {
1042    if(!addr)   return;
1043 
1044    //Let's get free block list. List is always sorted
1045    //by memory address to allow block merging.
1046    //Pointer next always points to the first
1047    //(lower address) block
1048    block_ctrl * prev  = &m_header.m_root;
1049    block_ctrl * pos   = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
1050    block_ctrl * block = move_detail::force_ptr<block_ctrl*>(priv_get_block(addr));
1051 
1052    //All used blocks' next is marked with 0 so check it
1053    BOOST_ASSERT(block->m_next == 0);
1054 
1055    //Check if alignment and block size are right
1056    algo_impl_t::assert_alignment(addr);
1057 
1058    size_type total_size = Alignment*block->m_size;
1059    BOOST_ASSERT(m_header.m_allocated >= total_size);
1060 
1061    //Update used memory count
1062    m_header.m_allocated -= total_size;
1063 
1064    //Let's find the previous and the next block of the block to deallocate
1065    //This ordering comparison must be done with original pointers
1066    //types since their mapping to raw pointers can be different
1067    //in each process
1068    while((ipcdetail::to_raw_pointer(pos) != &m_header.m_root) && (block > pos)){
1069       prev = pos;
1070       pos = ipcdetail::to_raw_pointer(pos->m_next);
1071    }
1072 
1073    //Try to combine with upper block
1074    char *block_char_ptr = reinterpret_cast<char*>(ipcdetail::to_raw_pointer(block));
1075 
1076    if ((block_char_ptr + Alignment*block->m_size) ==
1077          reinterpret_cast<char*>(ipcdetail::to_raw_pointer(pos))){
1078       block->m_size += pos->m_size;
1079       block->m_next  = pos->m_next;
1080    }
1081    else{
1082       block->m_next = pos;
1083    }
1084 
1085    //Try to combine with lower block
1086    if ((reinterpret_cast<char*>(ipcdetail::to_raw_pointer(prev))
1087             + Alignment*prev->m_size) ==
1088         block_char_ptr){
1089 
1090 
1091       prev->m_size += block->m_size;
1092       prev->m_next  = block->m_next;
1093    }
1094    else{
1095       prev->m_next = block;
1096    }
1097 }
1098 
1099 }  //namespace ipcdetail {
1100 
1101 }  //namespace interprocess {
1102 
1103 }  //namespace boost {
1104 
1105 #include <boost/interprocess/detail/config_end.hpp>
1106 
1107 #endif   //#ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
1108