Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:38:29

0001 //////////////////////////////////////////////////////////////////////////////
0002 //
0003 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
0004 // Software License, Version 1.0. (See accompanying file
0005 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
0006 //
0007 // See http://www.boost.org/libs/interprocess for documentation.
0008 //
0009 //////////////////////////////////////////////////////////////////////////////
0010 
0011 #ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
0012 #define BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
0013 
0014 #ifndef BOOST_CONFIG_HPP
0015 #  include <boost/config.hpp>
0016 #endif
0017 #
0018 #if defined(BOOST_HAS_PRAGMA_ONCE)
0019 #  pragma once
0020 #endif
0021 
0022 #include <boost/interprocess/detail/config_begin.hpp>
0023 #include <boost/interprocess/detail/workaround.hpp>
0024 
0025 // interprocess
0026 #include <boost/interprocess/interprocess_fwd.hpp>
0027 #include <boost/interprocess/containers/allocation_type.hpp>
0028 // interprocess/detail
0029 #include <boost/interprocess/detail/math_functions.hpp>
0030 #include <boost/interprocess/detail/min_max.hpp>
0031 #include <boost/interprocess/detail/type_traits.hpp>
0032 #include <boost/interprocess/detail/utilities.hpp>
0033 // container/detail
0034 #include <boost/container/detail/multiallocation_chain.hpp>
0035 #include <boost/container/detail/placement_new.hpp>
0036 // move
0037 #include <boost/move/utility_core.hpp>
0038 // move/detail
0039 #include <boost/move/detail/force_ptr.hpp>
0040 // other boost
0041 #include <boost/static_assert.hpp>
0042 #include <boost/assert.hpp>
0043 
0044 //!\file
0045 //!Implements common operations for memory algorithms.
0046 
0047 namespace boost {
0048 namespace interprocess {
0049 namespace ipcdetail {
0050 
0051 template<class VoidPointer>
0052 class basic_multiallocation_chain
0053    : public boost::container::dtl::
0054       basic_multiallocation_chain<VoidPointer>
0055 {
0056    BOOST_MOVABLE_BUT_NOT_COPYABLE(basic_multiallocation_chain)
0057    typedef boost::container::dtl::
0058       basic_multiallocation_chain<VoidPointer> base_t;
0059    public:
0060 
0061    basic_multiallocation_chain()
0062       :  base_t()
0063    {}
0064 
0065    basic_multiallocation_chain(BOOST_RV_REF(basic_multiallocation_chain) other)
0066       :  base_t(::boost::move(static_cast<base_t&>(other)))
0067    {}
0068 
0069    basic_multiallocation_chain& operator=(BOOST_RV_REF(basic_multiallocation_chain) other)
0070    {
0071       this->base_t::operator=(::boost::move(static_cast<base_t&>(other)));
0072       return *this;
0073    }
0074 
0075    void *pop_front()
0076    {
0077       return boost::interprocess::ipcdetail::to_raw_pointer(this->base_t::pop_front());
0078    }
0079 };
0080 
0081 
0082 //!This class implements several allocation functions shared by different algorithms
0083 //!(aligned allocation, multiple allocation...).
0084 template<class MemoryAlgorithm>
0085 class memory_algorithm_common
0086 {
0087    public:
0088    typedef typename MemoryAlgorithm::void_pointer              void_pointer;
0089    typedef typename MemoryAlgorithm::block_ctrl                block_ctrl;
0090    typedef typename MemoryAlgorithm::multiallocation_chain     multiallocation_chain;
0091    typedef memory_algorithm_common<MemoryAlgorithm>            this_type;
0092    typedef typename MemoryAlgorithm::size_type                 size_type;
0093 
0094    static const size_type Alignment              = MemoryAlgorithm::Alignment;
0095    static const size_type MinBlockUnits          = MemoryAlgorithm::MinBlockUnits;
0096    static const size_type AllocatedCtrlBytes     = MemoryAlgorithm::AllocatedCtrlBytes;
0097    static const size_type AllocatedCtrlUnits     = MemoryAlgorithm::AllocatedCtrlUnits;
0098    static const size_type BlockCtrlBytes         = MemoryAlgorithm::BlockCtrlBytes;
0099    static const size_type BlockCtrlUnits         = MemoryAlgorithm::BlockCtrlUnits;
0100    static const size_type UsableByPreviousChunk  = MemoryAlgorithm::UsableByPreviousChunk;
0101 
0102    static void assert_alignment(const void *ptr)
0103    {  assert_alignment((std::size_t)ptr); }
0104 
0105    static void assert_alignment(size_type uint_ptr)
0106    {
0107       (void)uint_ptr;
0108       BOOST_ASSERT(uint_ptr % Alignment == 0);
0109    }
0110 
0111    static bool check_alignment(const void *ptr)
0112    {  return (((std::size_t)ptr) % Alignment == 0);   }
0113 
0114    static size_type ceil_units(size_type size)
0115    {  return get_rounded_size(size, Alignment)/Alignment; }
0116 
0117    static size_type floor_units(size_type size)
0118    {  return size/Alignment;  }
0119 
0120    static size_type multiple_of_units(size_type size)
0121    {  return get_rounded_size(size, Alignment);  }
0122 
0123    static void allocate_many
0124       (MemoryAlgorithm *memory_algo, size_type elem_bytes, size_type n_elements, multiallocation_chain &chain)
0125    {
0126       return this_type::priv_allocate_many(memory_algo, &elem_bytes, n_elements, 0, chain);
0127    }
0128 
0129    static void deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain &chain)
0130    {
0131       return this_type::priv_deallocate_many(memory_algo, chain);
0132    }
0133 
0134    static bool calculate_lcm_and_needs_backwards_lcmed
0135       (size_type backwards_multiple, size_type received_size, size_type size_to_achieve,
0136       size_type &lcm_out, size_type &needs_backwards_lcmed_out)
0137    {
0138       // Now calculate lcm_val
0139       size_type max = backwards_multiple;
0140       size_type min = Alignment;
0141       size_type needs_backwards;
0142       size_type needs_backwards_lcmed;
0143       size_type lcm_val;
0144       size_type current_forward;
0145       //Swap if necessary
0146       if(max < min){
0147          size_type tmp = min;
0148          min = max;
0149          max = tmp;
0150       }
0151       //Check if it's power of two
0152       if((backwards_multiple & (backwards_multiple-1)) == 0){
0153          if(0 != (size_to_achieve & ((backwards_multiple-1)))){
0154             return false;
0155          }
0156 
0157          lcm_val = max;
0158          //If we want to use minbytes data to get a buffer between maxbytes
0159          //and minbytes if maxbytes can't be achieved, calculate the
0160          //biggest of all possibilities
0161          current_forward = get_truncated_size_po2(received_size, backwards_multiple);
0162          needs_backwards = size_to_achieve - current_forward;
0163          BOOST_ASSERT((needs_backwards % backwards_multiple) == 0);
0164          needs_backwards_lcmed = get_rounded_size_po2(needs_backwards, lcm_val);
0165          lcm_out = lcm_val;
0166          needs_backwards_lcmed_out = needs_backwards_lcmed;
0167          return true;
0168       }
0169       //Check if it's multiple of alignment
0170       else if((backwards_multiple & (Alignment - 1u)) == 0){
0171          lcm_val = backwards_multiple;
0172          current_forward = get_truncated_size(received_size, backwards_multiple);
0173          //No need to round needs_backwards because backwards_multiple == lcm_val
0174          needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
0175          BOOST_ASSERT((needs_backwards_lcmed & (Alignment - 1u)) == 0);
0176          lcm_out = lcm_val;
0177          needs_backwards_lcmed_out = needs_backwards_lcmed;
0178          return true;
0179       }
0180       //Check if it's multiple of the half of the alignmment
0181       else if((backwards_multiple & ((Alignment/2u) - 1u)) == 0){
0182          lcm_val = backwards_multiple*2u;
0183          current_forward = get_truncated_size(received_size, backwards_multiple);
0184          needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
0185          if(0 != (needs_backwards_lcmed & (Alignment-1)))
0186          //while(0 != (needs_backwards_lcmed & (Alignment-1)))
0187             needs_backwards_lcmed += backwards_multiple;
0188          BOOST_ASSERT((needs_backwards_lcmed % lcm_val) == 0);
0189          lcm_out = lcm_val;
0190          needs_backwards_lcmed_out = needs_backwards_lcmed;
0191          return true;
0192       }
0193       //Check if it's multiple of the quarter of the alignmment
0194       else if((backwards_multiple & ((Alignment/4u) - 1u)) == 0){
0195          size_type remainder;
0196          lcm_val = backwards_multiple*4u;
0197          current_forward = get_truncated_size(received_size, backwards_multiple);
0198          needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
0199          //while(0 != (needs_backwards_lcmed & (Alignment-1)))
0200             //needs_backwards_lcmed += backwards_multiple;
0201          if(0 != (remainder = ((needs_backwards_lcmed & (Alignment-1))>>(Alignment/8u)))){
0202             if(backwards_multiple & Alignment/2u){
0203                needs_backwards_lcmed += (remainder)*backwards_multiple;
0204             }
0205             else{
0206                needs_backwards_lcmed += (4-remainder)*backwards_multiple;
0207             }
0208          }
0209          BOOST_ASSERT((needs_backwards_lcmed % lcm_val) == 0);
0210          lcm_out = lcm_val;
0211          needs_backwards_lcmed_out = needs_backwards_lcmed;
0212          return true;
0213       }
0214       else{
0215          lcm_val = lcm(max, min);
0216       }
0217       //If we want to use minbytes data to get a buffer between maxbytes
0218       //and minbytes if maxbytes can't be achieved, calculate the
0219       //biggest of all possibilities
0220       current_forward = get_truncated_size(received_size, backwards_multiple);
0221       needs_backwards = size_to_achieve - current_forward;
0222       BOOST_ASSERT((needs_backwards % backwards_multiple) == 0);
0223       needs_backwards_lcmed = get_rounded_size(needs_backwards, lcm_val);
0224       lcm_out = lcm_val;
0225       needs_backwards_lcmed_out = needs_backwards_lcmed;
0226       return true;
0227    }
0228 
0229    static void allocate_many
0230       ( MemoryAlgorithm *memory_algo
0231       , const size_type *elem_sizes
0232       , size_type n_elements
0233       , size_type sizeof_element
0234       , multiallocation_chain &chain)
0235    {
0236       this_type::priv_allocate_many(memory_algo, elem_sizes, n_elements, sizeof_element, chain);
0237    }
0238 
0239    static void* allocate_aligned
0240       (MemoryAlgorithm *memory_algo, size_type nbytes, size_type alignment)
0241    {
0242 
0243       //Ensure power of 2
0244       if ((alignment & (alignment - size_type(1u))) != 0){
0245          //Alignment is not power of two
0246          BOOST_ASSERT((alignment & (alignment - size_type(1u))) == 0);
0247          return 0;
0248       }
0249 
0250       size_type real_size = nbytes;
0251       if(alignment <= Alignment){
0252          void *ignore_reuse = 0;
0253          return memory_algo->priv_allocate
0254             (boost::interprocess::allocate_new, nbytes, real_size, ignore_reuse);
0255       }
0256 
0257       if(nbytes > UsableByPreviousChunk)
0258          nbytes -= UsableByPreviousChunk;
0259 
0260       //We can find a aligned portion if we allocate a block that has alignment
0261       //nbytes + alignment bytes or more.
0262       size_type minimum_allocation = max_value
0263          (nbytes + alignment, size_type(MinBlockUnits*Alignment));
0264       //Since we will split that block, we must request a bit more memory
0265       //if the alignment is near the beginning of the buffer, because otherwise,
0266       //there is no space for a new block before the alignment.
0267       //
0268       //            ____ Aligned here
0269       //           |
0270       //  -----------------------------------------------------
0271       // | MBU |
0272       //  -----------------------------------------------------
0273       size_type request =
0274          minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes
0275          //prevsize - UsableByPreviousChunk
0276          );
0277 
0278       //Now allocate the buffer
0279       real_size = request;
0280       void *ignore_reuse = 0;
0281       void *buffer = memory_algo->priv_allocate(boost::interprocess::allocate_new, request, real_size, ignore_reuse);
0282       if(!buffer){
0283          return 0;
0284       }
0285       else if ((((std::size_t)(buffer)) % alignment) == 0){
0286          //If we are lucky and the buffer is aligned, just split it and
0287          //return the high part
0288          block_ctrl *first  = memory_algo->priv_get_block(buffer);
0289          size_type old_size = first->m_size;
0290          const size_type first_min_units =
0291             max_value(ceil_units(nbytes) + AllocatedCtrlUnits, size_type(MinBlockUnits));
0292          //We can create a new block in the end of the segment
0293          if(old_size >= (first_min_units + MinBlockUnits)){
0294             block_ctrl *second =  move_detail::force_ptr<block_ctrl*>
0295                (reinterpret_cast<char*>(first) + Alignment*first_min_units);
0296             first->m_size  = first_min_units & block_ctrl::size_mask;
0297             second->m_size = (old_size - first->m_size) & block_ctrl::size_mask;
0298             BOOST_ASSERT(second->m_size >= MinBlockUnits);
0299             memory_algo->priv_mark_new_allocated_block(first);
0300             memory_algo->priv_mark_new_allocated_block(second);
0301             memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(second));
0302          }
0303          return buffer;
0304       }
0305 
0306       //Buffer not aligned, find the aligned part.
0307       //
0308       //                    ____ Aligned here
0309       //                   |
0310       //  -----------------------------------------------------
0311       // | MBU +more | ACB |
0312       //  -----------------------------------------------------
0313       char *pos = reinterpret_cast<char*>
0314          (reinterpret_cast<std::size_t>(static_cast<char*>(buffer) +
0315             //This is the minimum size of (2)
0316             (MinBlockUnits*Alignment - AllocatedCtrlBytes) +
0317             //This is the next MBU for the aligned memory
0318             AllocatedCtrlBytes +
0319             //This is the alignment trick
0320             alignment - 1) & -alignment);
0321 
0322       //Now obtain the address of the blocks
0323       block_ctrl *first  = memory_algo->priv_get_block(buffer);
0324       block_ctrl *second = memory_algo->priv_get_block(pos);
0325       BOOST_ASSERT(pos <= (reinterpret_cast<char*>(first) + first->m_size*Alignment));
0326       BOOST_ASSERT(first->m_size >= 2*MinBlockUnits);
0327       BOOST_ASSERT((pos + MinBlockUnits*Alignment - AllocatedCtrlBytes + nbytes*Alignment/Alignment) <=
0328              (reinterpret_cast<char*>(first) + first->m_size*Alignment));
0329       //Set the new size of the first block
0330       size_type old_size = first->m_size;
0331       first->m_size = size_type(size_type(reinterpret_cast<char*>(second) - reinterpret_cast<char*>(first))/Alignment
0332                         & block_ctrl::size_mask);
0333       memory_algo->priv_mark_new_allocated_block(first);
0334 
0335       //Now check if we can create a new buffer in the end
0336       //
0337       //              __"second" block
0338       //             |      __Aligned here
0339       //             |     |      __"third" block
0340       //  -----------|-----|-----|------------------------------
0341       // | MBU +more | ACB | (3) | BCU |
0342       //  -----------------------------------------------------
0343       //This size will be the minimum size to be able to create a
0344       //new block in the end.
0345       const size_type second_min_units = max_value(size_type(MinBlockUnits),
0346                         ceil_units(nbytes) + AllocatedCtrlUnits );
0347 
0348       //Check if we can create a new block (of size MinBlockUnits) in the end of the segment
0349       if((old_size - first->m_size) >= (second_min_units + MinBlockUnits)){
0350          //Now obtain the address of the end block
0351          block_ctrl *third = new (reinterpret_cast<char*>(second) + Alignment*second_min_units)block_ctrl;
0352          second->m_size = second_min_units & block_ctrl::size_mask;
0353          third->m_size  = (old_size - first->m_size - second->m_size) & block_ctrl::size_mask;
0354          BOOST_ASSERT(third->m_size >= MinBlockUnits);
0355          memory_algo->priv_mark_new_allocated_block(second);
0356          memory_algo->priv_mark_new_allocated_block(third);
0357          memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(third));
0358       }
0359       else{
0360          second->m_size = (old_size - first->m_size) & block_ctrl::size_mask;
0361          BOOST_ASSERT(second->m_size >= MinBlockUnits);
0362          memory_algo->priv_mark_new_allocated_block(second);
0363       }
0364 
0365       memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(first));
0366       return memory_algo->priv_get_user_buffer(second);
0367    }
0368 
0369    static bool try_shrink
0370       (MemoryAlgorithm *memory_algo, void *ptr
0371       ,const size_type max_size, size_type &received_size)
0372    {
0373       size_type const preferred_size = received_size;
0374       (void)memory_algo;
0375       //Obtain the real block
0376       block_ctrl *block = memory_algo->priv_get_block(ptr);
0377       size_type old_block_units = (size_type)block->m_size;
0378 
0379       //The block must be marked as allocated
0380       BOOST_ASSERT(memory_algo->priv_is_allocated_block(block));
0381 
0382       //Check if alignment and block size are right
0383       assert_alignment(ptr);
0384 
0385       //Put this to a safe value
0386       received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
0387 
0388       //Now translate it to Alignment units
0389       const size_type max_user_units       = floor_units(max_size - UsableByPreviousChunk);
0390       const size_type preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
0391 
0392       //Check if rounded max and preferred are possible correct
0393       if(max_user_units < preferred_user_units)
0394          return false;
0395 
0396       //Check if the block is smaller than the requested minimum
0397       size_type old_user_units = old_block_units - AllocatedCtrlUnits;
0398 
0399       if(old_user_units < preferred_user_units)
0400          return false;
0401 
0402       //If the block is smaller than the requested minimum
0403       if(old_user_units == preferred_user_units)
0404          return true;
0405 
0406       size_type shrunk_user_units =
0407          ((BlockCtrlUnits - AllocatedCtrlUnits) >= preferred_user_units)
0408          ? (BlockCtrlUnits - AllocatedCtrlUnits)
0409          : preferred_user_units;
0410 
0411       //Some parameter checks
0412       if(max_user_units < shrunk_user_units)
0413          return false;
0414 
0415       //We must be able to create at least a new empty block
0416       if((old_user_units - shrunk_user_units) < BlockCtrlUnits ){
0417          return false;
0418       }
0419 
0420       //Update new size
0421       received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
0422       return true;
0423    }
0424 
0425    static bool shrink
0426       (MemoryAlgorithm *memory_algo, void *ptr
0427       ,const size_type max_size, size_type &received_size)
0428    {
0429       size_type const preferred_size = received_size;
0430       //Obtain the real block
0431       block_ctrl *block = memory_algo->priv_get_block(ptr);
0432       size_type old_block_units = (size_type)block->m_size;
0433 
0434       if(!try_shrink(memory_algo, ptr, max_size, received_size)){
0435          return false;
0436       }
0437 
0438       //Check if the old size was just the shrunk size (no splitting)
0439       if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
0440          return true;
0441 
0442       //Now we can just rewrite the size of the old buffer
0443       block->m_size = ((received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits) & block_ctrl::size_mask;
0444       BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
0445 
0446       //We create the new block
0447       block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
0448                   (reinterpret_cast<char*>(block) + block->m_size*Alignment);
0449       //Write control data to simulate this new block was previously allocated
0450       //and deallocate it
0451       new_block->m_size = (old_block_units - block->m_size) & block_ctrl::size_mask;
0452       BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
0453       memory_algo->priv_mark_new_allocated_block(block);
0454       memory_algo->priv_mark_new_allocated_block(new_block);
0455       memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
0456       return true;
0457    }
0458 
0459    private:
0460    static void priv_allocate_many
0461       ( MemoryAlgorithm *memory_algo
0462       , const size_type *elem_sizes
0463       , size_type n_elements
0464       , size_type sizeof_element
0465       , multiallocation_chain &chain)
0466    {
0467       //Note: sizeof_element == 0 indicates that we want to
0468       //allocate n_elements of the same size "*elem_sizes"
0469 
0470       //Calculate the total size of all requests
0471       size_type total_request_units = 0;
0472       size_type elem_units = 0;
0473       const size_type ptr_size_units = memory_algo->priv_get_total_units(sizeof(void_pointer));
0474       if(!sizeof_element){
0475          elem_units = memory_algo->priv_get_total_units(*elem_sizes);
0476          elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
0477          total_request_units = n_elements*elem_units;
0478       }
0479       else{
0480          for(size_type i = 0; i < n_elements; ++i){
0481             if(multiplication_overflows(elem_sizes[i], sizeof_element)){
0482                total_request_units = 0;
0483                break;
0484             }
0485             elem_units = memory_algo->priv_get_total_units(elem_sizes[i]*sizeof_element);
0486             elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
0487             if(sum_overflows(total_request_units, elem_units)){
0488                total_request_units = 0;
0489                break;
0490             }
0491             total_request_units += elem_units;
0492          }
0493       }
0494 
0495       if(total_request_units && !multiplication_overflows(total_request_units, Alignment)){
0496          size_type low_idx = 0;
0497          while(low_idx < n_elements){
0498             size_type total_bytes = total_request_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
0499             size_type min_allocation = (!sizeof_element)
0500                ?  elem_units
0501                :  memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
0502             min_allocation = min_allocation*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
0503 
0504             size_type received_size = total_bytes;
0505             void *ignore_reuse = 0;
0506             void *ret = memory_algo->priv_allocate
0507                (boost::interprocess::allocate_new, min_allocation, received_size, ignore_reuse);
0508             if(!ret){
0509                break;
0510             }
0511 
0512             block_ctrl *block = memory_algo->priv_get_block(ret);
0513             size_type received_units = (size_type)block->m_size;
0514             char *block_address = reinterpret_cast<char*>(block);
0515 
0516             size_type total_used_units = 0;
0517             while(total_used_units < received_units){
0518                if(sizeof_element){
0519                   elem_units = memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
0520                   elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
0521                }
0522                if(total_used_units + elem_units > received_units)
0523                   break;
0524                total_request_units -= elem_units;
0525                //This is the position where the new block must be created
0526                block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>(block_address);
0527                assert_alignment(new_block);
0528 
0529                //The last block should take all the remaining space
0530                if((low_idx + 1) == n_elements ||
0531                   (total_used_units + elem_units +
0532                   ((!sizeof_element)
0533                      ? elem_units
0534                : max_value(memory_algo->priv_get_total_units(elem_sizes[low_idx+1]*sizeof_element), ptr_size_units))
0535                    > received_units)){
0536                   //By default, the new block will use the rest of the buffer
0537                   new_block->m_size = (received_units - total_used_units) & block_ctrl::size_mask;
0538                   memory_algo->priv_mark_new_allocated_block(new_block);
0539 
0540                   //If the remaining units are bigger than needed and we can
0541                   //split it obtaining a new free memory block do it.
0542                   if((received_units - total_used_units) >= (elem_units + MemoryAlgorithm::BlockCtrlUnits)){
0543                      size_type shrunk_request = elem_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
0544                      size_type shrunk_received = shrunk_request;
0545                      bool shrink_ok = shrink
0546                            (memory_algo
0547                            ,memory_algo->priv_get_user_buffer(new_block)
0548                            ,shrunk_request
0549                            ,shrunk_received);
0550                      (void)shrink_ok;
0551                      //Shrink must always succeed with passed parameters
0552                      BOOST_ASSERT(shrink_ok);
0553                      //Some sanity checks
0554                      BOOST_ASSERT(shrunk_request == shrunk_received);
0555                      BOOST_ASSERT(elem_units == ((shrunk_request-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits));
0556                      //"new_block->m_size" must have been reduced to elem_units by "shrink"
0557                      BOOST_ASSERT(new_block->m_size == elem_units);
0558                      //Now update the total received units with the reduction
0559                      received_units = elem_units + total_used_units;
0560                   }
0561                }
0562                else{
0563                   new_block->m_size = elem_units & block_ctrl::size_mask;
0564                   memory_algo->priv_mark_new_allocated_block(new_block);
0565                }
0566 
0567                block_address += new_block->m_size*Alignment;
0568                total_used_units += (size_type)new_block->m_size;
0569                //Check we have enough room to overwrite the intrusive pointer
0570                BOOST_ASSERT((new_block->m_size*Alignment - AllocatedCtrlUnits) >= sizeof(void_pointer));
0571                void_pointer p = ::new(memory_algo->priv_get_user_buffer(new_block), boost_container_new_t())void_pointer(0);
0572                chain.push_back(p);
0573                ++low_idx;
0574             }
0575             //Sanity check
0576             BOOST_ASSERT(total_used_units == received_units);
0577          }
0578 
0579          if(low_idx != n_elements){
0580             priv_deallocate_many(memory_algo, chain);
0581          }
0582       }
0583    }
0584 
0585    static void priv_deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain &chain)
0586    {
0587       while(!chain.empty()){
0588          memory_algo->priv_deallocate(to_raw_pointer(chain.pop_front()));
0589       }
0590    }
0591 };
0592 
0593 }  //namespace ipcdetail {
0594 }  //namespace interprocess {
0595 }  //namespace boost {
0596 
0597 #include <boost/interprocess/detail/config_end.hpp>
0598 
0599 #endif   //#ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP