File indexing completed on 2025-01-18 09:38:30
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
0012 #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
0013
0014 #ifndef BOOST_CONFIG_HPP
0015 # include <boost/config.hpp>
0016 #endif
0017 #
0018 #if defined(BOOST_HAS_PRAGMA_ONCE)
0019 # pragma once
0020 #endif
0021
0022 #include <boost/interprocess/detail/config_begin.hpp>
0023 #include <boost/interprocess/detail/workaround.hpp>
0024
0025
0026 #include <boost/interprocess/containers/allocation_type.hpp>
0027 #include <boost/interprocess/exceptions.hpp>
0028 #include <boost/interprocess/interprocess_fwd.hpp>
0029 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
0030 #include <boost/interprocess/offset_ptr.hpp>
0031 #include <boost/interprocess/sync/scoped_lock.hpp>
0032
0033 #include <boost/interprocess/detail/min_max.hpp>
0034 #include <boost/interprocess/detail/math_functions.hpp>
0035 #include <boost/interprocess/detail/type_traits.hpp>
0036 #include <boost/interprocess/detail/utilities.hpp>
0037
0038 #include <boost/container/detail/multiallocation_chain.hpp>
0039
0040 #include <boost/container/detail/placement_new.hpp>
0041
0042 #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
0043 #include <boost/move/detail/force_ptr.hpp> //make_unsigned, alignment_of
0044
0045 #include <boost/intrusive/pointer_traits.hpp>
0046 #include <boost/intrusive/set.hpp>
0047
0048 #include <boost/assert.hpp>
0049 #include <boost/static_assert.hpp>
0050
0051 #include <climits>
0052 #include <cstring>
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 namespace boost {
0065 namespace interprocess {
0066
0067
0068
0069 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0070 class rbtree_best_fit
0071 {
0072 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0073
0074 rbtree_best_fit();
0075 rbtree_best_fit(const rbtree_best_fit &);
0076 rbtree_best_fit &operator=(const rbtree_best_fit &);
0077
0078 private:
0079 struct block_ctrl;
0080 typedef typename boost::intrusive::
0081 pointer_traits<VoidPointer>::template
0082 rebind_pointer<block_ctrl>::type block_ctrl_ptr;
0083
0084 typedef typename boost::intrusive::
0085 pointer_traits<VoidPointer>::template
0086 rebind_pointer<char>::type char_ptr;
0087
0088 #endif
0089
0090 public:
0091
0092 typedef MutexFamily mutex_family;
0093
0094 typedef VoidPointer void_pointer;
0095 typedef ipcdetail::basic_multiallocation_chain<VoidPointer> multiallocation_chain;
0096
0097 typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
0098 typedef typename boost::container::dtl::make_unsigned<difference_type>::type size_type;
0099
0100 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0101
0102 private:
0103
0104 typedef typename bi::make_set_base_hook
0105 < bi::void_pointer<VoidPointer>
0106 , bi::optimize_size<true>
0107 , bi::link_mode<bi::normal_link> >::type TreeHook;
0108
0109 struct SizeHolder
0110 {
0111 static const size_type size_mask = size_type(-1) >> 2;
0112
0113
0114 size_type m_prev_size;
0115 size_type m_size : sizeof(size_type)*CHAR_BIT - 2;
0116 size_type m_prev_allocated : 1;
0117 size_type m_allocated : 1;
0118 };
0119
0120
0121 struct block_ctrl
0122 : public SizeHolder, public TreeHook
0123 {
0124 block_ctrl()
0125 { this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0; }
0126
0127 friend bool operator<(const block_ctrl &a, const block_ctrl &b)
0128 { return a.m_size < b.m_size; }
0129 friend bool operator==(const block_ctrl &a, const block_ctrl &b)
0130 { return a.m_size == b.m_size; }
0131 };
0132
0133 struct size_block_ctrl_compare
0134 {
0135 bool operator()(size_type size, const block_ctrl &block) const
0136 { return size < block.m_size; }
0137
0138 bool operator()(const block_ctrl &block, size_type size) const
0139 { return block.m_size < size; }
0140 };
0141
0142
0143 typedef typename MutexFamily::mutex_type mutex_type;
0144 typedef typename bi::make_multiset
0145 <block_ctrl, bi::base_hook<TreeHook> >::type Imultiset;
0146
0147 typedef typename Imultiset::iterator imultiset_iterator;
0148 typedef typename Imultiset::const_iterator imultiset_const_iterator;
0149
0150
0151
0152 struct header_t : public mutex_type
0153 {
0154 Imultiset m_imultiset;
0155
0156
0157 size_type m_extra_hdr_bytes;
0158
0159 size_type m_allocated;
0160
0161 size_type m_size;
0162 } m_header;
0163
0164 friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
0165
0166 typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
0167
0168 public:
0169 #endif
0170
0171
0172
0173
0174 rbtree_best_fit (size_type size, size_type extra_hdr_bytes);
0175
0176
0177 ~rbtree_best_fit();
0178
0179
0180 static size_type get_min_size (size_type extra_hdr_bytes);
0181
0182
0183
0184
0185 void* allocate (size_type nbytes);
0186
0187 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0188
0189
0190
0191
0192 void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
0193 {
0194
0195 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0196
0197 algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
0198 }
0199
0200
0201 void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
0202 {
0203
0204 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0205
0206 algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
0207 }
0208
0209
0210 void deallocate_many(multiallocation_chain &chain);
0211
0212 #endif
0213
0214
0215 void deallocate (void *addr);
0216
0217
0218 size_type get_size() const;
0219
0220
0221 size_type get_free_memory() const;
0222
0223
0224
0225 void zero_free_memory();
0226
0227
0228
0229 void grow(size_type extra_size);
0230
0231
0232 void shrink_to_fit();
0233
0234
0235 bool all_memory_deallocated();
0236
0237
0238
0239 bool check_sanity();
0240
0241 template<class T>
0242 T * allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
0243 size_type &prefer_in_recvd_out_size, T *&reuse);
0244
0245 void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_object,
0246 size_type &prefer_in_recvd_out_size,
0247 void *&reuse_ptr, size_type sizeof_object = 1);
0248
0249
0250 size_type size(const void *ptr) const;
0251
0252
0253
0254 void* allocate_aligned (size_type nbytes, size_type alignment);
0255
0256 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0257 private:
0258 static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
0259
0260 block_ctrl *priv_first_block();
0261
0262 block_ctrl *priv_end_block();
0263
0264 void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
0265 size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
0266
0267
0268
0269 void * priv_allocate( boost::interprocess::allocation_type command
0270 , size_type limit_size, size_type &prefer_in_recvd_out_size
0271 , void *&reuse_ptr, size_type backwards_multiple = 1);
0272
0273
0274 static block_ctrl *priv_get_block(const void *ptr);
0275
0276
0277 static void *priv_get_user_buffer(const block_ctrl *block);
0278
0279
0280
0281 static size_type priv_get_total_units(size_type userbytes);
0282
0283
0284 bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
0285
0286
0287 void* priv_expand_both_sides(boost::interprocess::allocation_type command
0288 ,size_type min_size
0289 ,size_type &prefer_in_recvd_out_size
0290 ,void *reuse_ptr
0291 ,bool only_preferred_backwards
0292 ,size_type backwards_multiple);
0293
0294
0295 bool priv_is_prev_allocated(block_ctrl *ptr);
0296
0297
0298 static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
0299
0300
0301 static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
0302
0303
0304 static block_ctrl * priv_prev_block(block_ctrl *ptr);
0305
0306
0307 static block_ctrl * priv_next_block(block_ctrl *ptr);
0308
0309
0310 bool priv_is_allocated_block(block_ctrl *ptr);
0311
0312
0313 void priv_mark_as_allocated_block(block_ctrl *ptr);
0314
0315
0316 void priv_mark_new_allocated_block(block_ctrl *ptr)
0317 { return priv_mark_as_allocated_block(ptr); }
0318
0319
0320 void priv_mark_as_free_block(block_ctrl *ptr);
0321
0322
0323
0324 void* priv_check_and_allocate(size_type units
0325 ,block_ctrl* block
0326 ,size_type &received_size);
0327
0328 void priv_deallocate(void *addr);
0329
0330
0331 void priv_add_segment(void *addr, size_type size);
0332
0333 public:
0334
0335 static const size_type Alignment = !MemAlignment
0336 ? size_type(::boost::container::dtl::alignment_of
0337 < ::boost::container::dtl::max_align_t>::value)
0338 : size_type(MemAlignment)
0339 ;
0340
0341 private:
0342
0343 BOOST_STATIC_ASSERT((Alignment >= 4));
0344
0345 BOOST_STATIC_ASSERT((Alignment >= ::boost::container::dtl::alignment_of<void_pointer>::value));
0346 static const size_type AlignmentMask = (Alignment - 1);
0347 static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
0348 static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
0349 static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
0350 static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment;
0351 static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
0352 static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment;
0353 static const size_type MinBlockUnits = BlockCtrlUnits;
0354 static const size_type UsableByPreviousChunk = sizeof(size_type);
0355
0356
0357 BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
0358 #endif
0359 public:
0360 static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
0361 };
0362
0363 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0364
0365 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0366 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0367 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0368 ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
0369 {
0370 size_type uint_this = (std::size_t)this_ptr;
0371 size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
0372 size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
0373 size_type block1_off = aligned_main_hdr_end - uint_this;
0374 algo_impl_t::assert_alignment(aligned_main_hdr_end);
0375 algo_impl_t::assert_alignment(uint_this + block1_off);
0376 return block1_off;
0377 }
0378
0379 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0380 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0381 priv_add_segment(void *addr, size_type segment_size)
0382 {
0383
0384 algo_impl_t::check_alignment(addr);
0385
0386 BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
0387
0388
0389 block_ctrl *first_big_block = ::new(addr, boost_container_new_t()) block_ctrl;
0390 first_big_block->m_size = (segment_size/Alignment - EndCtrlBlockUnits) & block_ctrl::size_mask;
0391 BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
0392
0393
0394 SizeHolder *end_block =
0395 ::new(reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment, boost_container_new_t()) SizeHolder;
0396
0397
0398 priv_mark_as_free_block (first_big_block);
0399 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0400 first_big_block->m_prev_size = end_block->m_size =
0401 size_type(reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignmen) & block_ctrl::size_mask;
0402 #else
0403 first_big_block->m_prev_size = end_block->m_size =
0404 size_type(reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment & block_ctrl::size_mask;
0405 #endif
0406 end_block->m_allocated = 1;
0407 first_big_block->m_prev_allocated = 1;
0408
0409 BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
0410 BOOST_ASSERT(priv_prev_block((block_ctrl*)end_block) == first_big_block);
0411 BOOST_ASSERT(priv_first_block() == first_big_block);
0412 BOOST_ASSERT(priv_end_block() == end_block);
0413
0414
0415
0416
0417
0418 BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
0419 < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
0420
0421 m_header.m_imultiset.insert(*first_big_block);
0422 }
0423
0424 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0425 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
0426 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0427 ::priv_first_block()
0428 {
0429 const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0430 return move_detail::force_ptr<block_ctrl*>(reinterpret_cast<char*>(this) + block1_off);
0431 }
0432
0433 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0434 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
0435 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0436 ::priv_end_block()
0437 {
0438 const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0439 const size_type original_first_block_size = (m_header.m_size - block1_off)/Alignment - EndCtrlBlockUnits;
0440 block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
0441 (reinterpret_cast<char*>(this) + block1_off + original_first_block_size*Alignment);
0442 return end_block;
0443 }
0444
0445 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0446 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0447 rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
0448 {
0449
0450 m_header.m_allocated = 0;
0451 m_header.m_size = segment_size;
0452 m_header.m_extra_hdr_bytes = extra_hdr_bytes;
0453
0454
0455
0456 BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
0457 size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes);
0458 priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
0459 }
0460
0461 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0462 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
0463 {
0464
0465
0466
0467 }
0468
0469 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0470 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
0471 {
0472
0473 block_ctrl *first_block = priv_first_block();
0474 block_ctrl *old_end_block = priv_end_block();
0475 size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
0476 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
0477
0478
0479 m_header.m_size += extra_size;
0480
0481
0482 if((m_header.m_size - old_border_offset) < MinBlockUnits){
0483 return;
0484 }
0485
0486
0487 size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
0488 block_ctrl *new_end_block = move_detail::force_ptr<block_ctrl*>
0489 (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
0490
0491
0492
0493
0494 new_end_block->m_allocated = 1;
0495 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0496 new_end_block->m_size = size_type(reinterpret_cast<char*>(first_block) -
0497 reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
0498 #else
0499 new_end_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
0500 reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
0501 #endif
0502 first_block->m_prev_size = new_end_block->m_size;
0503 first_block->m_prev_allocated = 1;
0504 BOOST_ASSERT(new_end_block == priv_end_block());
0505
0506
0507 block_ctrl *new_block = old_end_block;
0508 new_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
0509 reinterpret_cast<char*>(new_block))/Alignment & block_ctrl::size_mask;
0510 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
0511 priv_mark_as_allocated_block(new_block);
0512 BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
0513
0514 m_header.m_allocated += (size_type)new_block->m_size*Alignment;
0515
0516
0517 this->priv_deallocate(priv_get_user_buffer(new_block));
0518 }
0519
0520 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0521 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
0522 {
0523
0524 block_ctrl *first_block = priv_first_block();
0525 algo_impl_t::assert_alignment(first_block);
0526
0527
0528 block_ctrl *old_end_block = priv_end_block();
0529 algo_impl_t::assert_alignment(old_end_block);
0530 size_type old_end_block_size = old_end_block->m_size;
0531
0532 void *unique_buffer = 0;
0533 block_ctrl *last_block;
0534
0535 if(priv_next_block(first_block) == old_end_block){
0536
0537 size_type ignore_recvd = 0;
0538 void *ignore_reuse = 0;
0539 unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
0540
0541 if(!unique_buffer)
0542 return;
0543
0544 algo_impl_t::assert_alignment(unique_buffer);
0545 block_ctrl *unique_block = priv_get_block(unique_buffer);
0546 BOOST_ASSERT(priv_is_allocated_block(unique_block));
0547 algo_impl_t::assert_alignment(unique_block);
0548 last_block = priv_next_block(unique_block);
0549 BOOST_ASSERT(!priv_is_allocated_block(last_block));
0550 algo_impl_t::assert_alignment(last_block);
0551 }
0552 else{
0553
0554 if(priv_is_prev_allocated(old_end_block))
0555 return;
0556
0557 last_block = priv_prev_block(old_end_block);
0558 }
0559
0560 size_type last_block_size = last_block->m_size;
0561
0562
0563 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
0564
0565 size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
0566 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
0567
0568 block_ctrl *new_end_block = last_block;
0569 algo_impl_t::assert_alignment(new_end_block);
0570
0571
0572 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0573 new_end_block->m_size =
0574 size_type(reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
0575 first_block->m_prev_size = new_end_block->m_size;
0576 #else
0577 new_end_block->m_size =
0578 size_type(reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
0579 first_block->m_prev_size = new_end_block->m_size;
0580 #endif
0581
0582 new_end_block->m_allocated = 1;
0583 (void)last_block_size;
0584 (void)old_end_block_size;
0585 BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
0586
0587
0588 m_header.m_size = shrunk_border_offset & block_ctrl::size_mask;
0589 BOOST_ASSERT(priv_end_block() == new_end_block);
0590 if(unique_buffer)
0591 priv_deallocate(unique_buffer);
0592 }
0593
0594 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0595 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0596 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const
0597 { return m_header.m_size; }
0598
0599 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0600 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0601 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const
0602 {
0603 return m_header.m_size - m_header.m_allocated -
0604 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0605 }
0606
0607 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0608 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0609 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0610 get_min_size (size_type extra_hdr_bytes)
0611 {
0612 return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
0613 algo_impl_t::ceil_units(extra_hdr_bytes) +
0614 MinBlockUnits + EndCtrlBlockUnits)*Alignment;
0615 }
0616
0617 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0618 inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0619 all_memory_deallocated()
0620 {
0621
0622 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0623
0624 size_type block1_off =
0625 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0626
0627 return m_header.m_allocated == 0 &&
0628 m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
0629 (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
0630 && m_header.m_imultiset.begin()->m_size ==
0631 (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
0632 }
0633
0634 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0635 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0636 check_sanity()
0637 {
0638
0639 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0640
0641 imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
0642
0643 size_type free_memory = 0;
0644
0645
0646 for(; ib != ie; ++ib){
0647 free_memory += (size_type)ib->m_size*Alignment;
0648 if(!algo_impl_t::check_alignment(&*ib))
0649 return false;
0650 }
0651
0652
0653 if(m_header.m_allocated > m_header.m_size){
0654 return false;
0655 }
0656
0657 size_type block1_off =
0658 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0659
0660
0661 if(free_memory > (m_header.m_size - block1_off)){
0662 return false;
0663 }
0664 return true;
0665 }
0666
0667 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0668 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0669 allocate(size_type nbytes)
0670 {
0671
0672 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0673
0674 size_type ignore_recvd = nbytes;
0675 void *ignore_reuse = 0;
0676 return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
0677 }
0678
0679 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0680 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0681 allocate_aligned(size_type nbytes, size_type alignment)
0682 {
0683
0684 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0685
0686 return algo_impl_t::allocate_aligned(this, nbytes, alignment);
0687 }
0688
0689 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0690 template<class T>
0691 inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0692 allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
0693 size_type &prefer_in_recvd_out_size, T *&reuse)
0694 {
0695 void* raw_reuse = reuse;
0696 void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
0697 reuse = static_cast<T*>(raw_reuse);
0698 BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
0699 return static_cast<T*>(ret);
0700 }
0701
0702 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0703 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0704 raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
0705 size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
0706 {
0707 size_type const preferred_objects = prefer_in_recvd_out_objects;
0708 if(!sizeof_object)
0709 return reuse_ptr = 0, static_cast<void*>(0);
0710 if(command & boost::interprocess::try_shrink_in_place){
0711 if(!reuse_ptr) return static_cast<void*>(0);
0712 const bool success = algo_impl_t::try_shrink
0713 ( this, reuse_ptr, limit_objects*sizeof_object
0714 , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
0715 prefer_in_recvd_out_objects /= sizeof_object;
0716 return success ? reuse_ptr : 0;
0717 }
0718 else{
0719 return priv_allocation_command
0720 (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
0721 }
0722 }
0723
0724
0725 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0726 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0727 priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
0728 size_type &prefer_in_recvd_out_size,
0729 void *&reuse_ptr, size_type sizeof_object)
0730 {
0731 void* ret;
0732 size_type const preferred_size = prefer_in_recvd_out_size;
0733 size_type const max_count = m_header.m_size/sizeof_object;
0734 if(limit_size > max_count || preferred_size > max_count){
0735 return reuse_ptr = 0, static_cast<void*>(0);
0736 }
0737 size_type l_size = limit_size*sizeof_object;
0738 size_type p_size = preferred_size*sizeof_object;
0739 size_type r_size;
0740 {
0741
0742 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0743
0744 ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
0745 }
0746 prefer_in_recvd_out_size = r_size/sizeof_object;
0747 return ret;
0748 }
0749
0750 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0751 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0752 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0753 size(const void *ptr) const
0754 {
0755
0756
0757
0758 return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
0759 }
0760
0761 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0762 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
0763 {
0764
0765 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0766
0767 imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
0768
0769
0770 while(ib != ie){
0771
0772 volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
0773 size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
0774 while(s--){
0775 *ptr++ = 0;
0776 }
0777
0778
0779
0780
0781
0782 ++ib;
0783 }
0784 }
0785
0786 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0787 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0788 priv_expand_both_sides(boost::interprocess::allocation_type command
0789 ,size_type min_size
0790 ,size_type &prefer_in_recvd_out_size
0791 ,void *reuse_ptr
0792 ,bool only_preferred_backwards
0793 ,size_type backwards_multiple)
0794 {
0795 size_type const preferred_size = prefer_in_recvd_out_size;
0796 algo_impl_t::assert_alignment(reuse_ptr);
0797 if(command & boost::interprocess::expand_fwd){
0798 if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
0799 return reuse_ptr;
0800 }
0801 else{
0802 prefer_in_recvd_out_size = this->size(reuse_ptr);
0803 if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
0804 return reuse_ptr;
0805 }
0806
0807 if(backwards_multiple){
0808 BOOST_ASSERT(0 == (min_size % backwards_multiple));
0809 BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
0810 }
0811
0812 if(command & boost::interprocess::expand_bwd){
0813
0814 block_ctrl *reuse = priv_get_block(reuse_ptr);
0815
0816
0817 algo_impl_t::assert_alignment(reuse);
0818
0819 block_ctrl *prev_block;
0820
0821
0822 if(priv_is_prev_allocated(reuse)){
0823 return 0;
0824 }
0825
0826 prev_block = priv_prev_block(reuse);
0827 BOOST_ASSERT(!priv_is_allocated_block(prev_block));
0828
0829
0830 BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
0831 algo_impl_t::assert_alignment(prev_block);
0832
0833 size_type needs_backwards_aligned;
0834 size_type lcm;
0835 if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
0836 ( backwards_multiple
0837 , prefer_in_recvd_out_size
0838 , only_preferred_backwards ? preferred_size : min_size
0839 , lcm, needs_backwards_aligned)){
0840 return 0;
0841 }
0842
0843
0844 if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
0845
0846 if(command & boost::interprocess::expand_fwd){
0847 size_type received_size2;
0848 if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
0849 BOOST_ASSERT(0);
0850 }
0851 BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
0852 }
0853
0854 if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
0855 block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
0856 (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
0857
0858
0859 new_block->m_size =
0860 (AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment) & block_ctrl::size_mask;
0861 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
0862 priv_mark_as_allocated_block(new_block);
0863
0864 prev_block->m_size = size_type(reinterpret_cast<char*>(new_block) -
0865 reinterpret_cast<char*>(prev_block))/Alignment & block_ctrl::size_mask;
0866 BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
0867 priv_mark_as_free_block(prev_block);
0868
0869
0870
0871
0872 {
0873 imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
0874 imultiset_iterator was_smaller_it(prev_block_it);
0875 if(prev_block_it != m_header.m_imultiset.begin() &&
0876 (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
0877 m_header.m_imultiset.erase(prev_block_it);
0878 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
0879 }
0880 }
0881
0882 prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
0883 m_header.m_allocated += needs_backwards_aligned;
0884
0885
0886 algo_impl_t::assert_alignment(new_block);
0887
0888
0889
0890 void *p = priv_get_user_buffer(new_block);
0891 void *user_ptr = reinterpret_cast<char*>(p);
0892 BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
0893 algo_impl_t::assert_alignment(user_ptr);
0894 return user_ptr;
0895 }
0896
0897
0898 else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
0899 0 == ((prev_block->m_size*Alignment) % lcm)) {
0900
0901 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
0902
0903
0904
0905 prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
0906
0907 m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
0908
0909 prev_block->m_size = size_type(prev_block->m_size + reuse->m_size) & block_ctrl::size_mask;
0910 BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
0911 priv_mark_as_allocated_block(prev_block);
0912
0913
0914
0915 void *user_ptr = priv_get_user_buffer(prev_block);
0916 BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
0917 algo_impl_t::assert_alignment(user_ptr);
0918 return user_ptr;
0919 }
0920 else{
0921
0922 }
0923 }
0924 }
0925 return 0;
0926 }
0927
0928 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0929 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0930 deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
0931 {
0932
0933 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0934
0935 algo_impl_t::deallocate_many(this, chain);
0936 }
0937
0938 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0939 void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0940 priv_allocate(boost::interprocess::allocation_type command
0941 ,size_type limit_size
0942 ,size_type &prefer_in_recvd_out_size
0943 ,void *&reuse_ptr
0944 ,size_type backwards_multiple)
0945 {
0946 size_type const preferred_size = prefer_in_recvd_out_size;
0947 if(command & boost::interprocess::shrink_in_place){
0948 if(!reuse_ptr) return static_cast<void*>(0);
0949 bool success =
0950 algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
0951 return success ? reuse_ptr : 0;
0952 }
0953
0954 prefer_in_recvd_out_size = 0;
0955
0956 if(limit_size > preferred_size)
0957 return reuse_ptr = 0, static_cast<void*>(0);
0958
0959
0960 size_type preferred_units = priv_get_total_units(preferred_size);
0961
0962
0963 size_type limit_units = priv_get_total_units(limit_size);
0964
0965
0966 prefer_in_recvd_out_size = preferred_size;
0967 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
0968 void *ret = priv_expand_both_sides
0969 (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
0970 if(ret)
0971 return ret;
0972 }
0973
0974 if(command & boost::interprocess::allocate_new){
0975 size_block_ctrl_compare comp;
0976 imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
0977
0978 if(it != m_header.m_imultiset.end()){
0979 return reuse_ptr = 0, this->priv_check_and_allocate
0980 (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
0981 }
0982
0983 if(it != m_header.m_imultiset.begin()&&
0984 (--it)->m_size >= limit_units){
0985 return reuse_ptr = 0, this->priv_check_and_allocate
0986 (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
0987 }
0988 }
0989
0990
0991
0992 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
0993 return priv_expand_both_sides
0994 (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
0995 }
0996 return reuse_ptr = 0, static_cast<void*>(0);
0997 }
0998
0999 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1000 inline
1001 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1002 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
1003 {
1004 return const_cast<block_ctrl*>
1005 (move_detail::force_ptr<const block_ctrl*>
1006 (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
1007 }
1008
1009 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1010 inline
1011 void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1012 priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1013 { return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }
1014
1015 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1016 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
1017 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1018 priv_get_total_units(size_type userbytes)
1019 {
1020 if(userbytes < UsableByPreviousChunk)
1021 userbytes = UsableByPreviousChunk;
1022 size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
1023 if(units < BlockCtrlUnits) units = BlockCtrlUnits;
1024 return units;
1025 }
1026
1027 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1028 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1029 priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
1030 {
1031 size_type const preferred_size = prefer_in_recvd_out_size;
1032
1033 block_ctrl *block = priv_get_block(ptr);
1034 size_type old_block_units = block->m_size;
1035
1036
1037 BOOST_ASSERT(priv_is_allocated_block(block));
1038
1039
1040 prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1041 if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
1042 return true;
1043
1044
1045 const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
1046 const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
1047
1048
1049 BOOST_ASSERT(min_user_units <= preferred_user_units);
1050
1051 block_ctrl *next_block;
1052
1053 if(priv_is_allocated_block(next_block = priv_next_block(block))){
1054 return prefer_in_recvd_out_size >= min_size;
1055 }
1056 algo_impl_t::assert_alignment(next_block);
1057
1058
1059 const size_type merged_units = old_block_units + (size_type)next_block->m_size;
1060
1061
1062 const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
1063
1064 if(merged_user_units < min_user_units){
1065 prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
1066 return false;
1067 }
1068
1069
1070 size_type intended_user_units = (merged_user_units < preferred_user_units) ?
1071 merged_user_units : preferred_user_units;
1072
1073
1074 const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
1075
1076
1077 if((merged_units - intended_units) >= BlockCtrlUnits){
1078
1079
1080
1081 BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
1082 const size_type rem_units = merged_units - intended_units;
1083
1084
1085
1086
1087
1088
1089
1090
1091 imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
1092 m_header.m_imultiset.erase(old_next_block_it);
1093
1094
1095 block_ctrl *rem_block =
1096 ::new(reinterpret_cast<char*>(block) + intended_units*Alignment, boost_container_new_t()) block_ctrl;
1097 rem_block->m_size = rem_units & block_ctrl::size_mask;
1098 algo_impl_t::assert_alignment(rem_block);
1099 BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1100 priv_mark_as_free_block(rem_block);
1101 m_header.m_imultiset.insert(*rem_block);
1102
1103
1104 block->m_size = (intended_user_units + AllocatedCtrlUnits) & block_ctrl::size_mask;
1105 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1106 m_header.m_allocated += (intended_units - old_block_units)*Alignment;
1107 }
1108
1109 else{
1110
1111 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
1112
1113
1114 block->m_size = merged_units & block_ctrl::size_mask;
1115 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1116 m_header.m_allocated += (merged_units - old_block_units)*Alignment;
1117 }
1118 priv_mark_as_allocated_block(block);
1119 prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1120 return true;
1121 }
1122
1123 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1124 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1125 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
1126 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1127 {
1128 BOOST_ASSERT(!ptr->m_prev_allocated);
1129 return move_detail::force_ptr<block_ctrl*>
1130 (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
1131 }
1132
1133
1134
1135 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1136 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1137 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
1138 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
1139 {
1140
1141
1142 BOOST_ASSERT(first_segment_block->m_prev_allocated);
1143 block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
1144 (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
1145 (void)end_block;
1146 BOOST_ASSERT(end_block->m_allocated == 1);
1147 BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
1148 BOOST_ASSERT(end_block > first_segment_block);
1149 return end_block;
1150 }
1151
1152 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1153 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1154 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
1155 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
1156 {
1157
1158
1159 BOOST_ASSERT(end_segment_block->m_allocated);
1160 block_ctrl *first_block = move_detail::force_ptr<block_ctrl*>
1161 (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
1162 (void)first_block;
1163 BOOST_ASSERT(first_block->m_prev_allocated == 1);
1164 BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
1165 BOOST_ASSERT(end_segment_block > first_block);
1166 return first_block;
1167 }
1168
1169
1170 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1171 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1172 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
1173 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1174 {
1175 return move_detail::force_ptr<block_ctrl*>
1176 (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
1177 }
1178
1179 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1180 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
1181 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1182 {
1183 bool allocated = block->m_allocated != 0;
1184 #ifndef NDEBUG
1185 if(block != priv_end_block()){
1186 block_ctrl *next_block = move_detail::force_ptr<block_ctrl*>
1187 (reinterpret_cast<char*>(block) + block->m_size*Alignment);
1188 bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
1189 (void)next_block_prev_allocated;
1190 BOOST_ASSERT(allocated == next_block_prev_allocated);
1191 }
1192 #endif
1193 return allocated;
1194 }
1195
1196 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1197 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
1198 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1199 {
1200 if(block->m_prev_allocated){
1201 return true;
1202 }
1203 else{
1204 #ifndef NDEBUG
1205 if(block != priv_first_block()){
1206 block_ctrl *prev = priv_prev_block(block);
1207 (void)prev;
1208 BOOST_ASSERT(!prev->m_allocated);
1209 BOOST_ASSERT(prev->m_size == block->m_prev_size);
1210 }
1211 #endif
1212 return false;
1213 }
1214 }
1215
1216 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1217 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
1218 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1219 {
1220 block->m_allocated = 1;
1221 move_detail::force_ptr<block_ctrl*>
1222 (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
1223 }
1224
1225 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1226 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
1227 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1228 {
1229 block->m_allocated = 0;
1230 block_ctrl *next_block = priv_next_block(block);
1231 next_block->m_prev_allocated = 0;
1232 next_block->m_prev_size = block->m_size;
1233 }
1234
1235 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1236 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
1237 (size_type nunits
1238 ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
1239 ,size_type &received_size)
1240 {
1241 size_type upper_nunits = nunits + BlockCtrlUnits;
1242 imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
1243 algo_impl_t::assert_alignment(block);
1244
1245 if (block->m_size >= upper_nunits){
1246
1247
1248
1249 size_type block_old_size = block->m_size;
1250 block->m_size = nunits & block_ctrl::size_mask;
1251 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1252
1253
1254 block_ctrl *rem_block =
1255 ::new(reinterpret_cast<char*>(block) + Alignment*nunits, boost_container_new_t()) block_ctrl;
1256 algo_impl_t::assert_alignment(rem_block);
1257 rem_block->m_size = (block_old_size - nunits) & block_ctrl::size_mask;
1258 BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1259 priv_mark_as_free_block(rem_block);
1260
1261
1262
1263 m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
1264 }
1265 else if (block->m_size >= nunits){
1266 m_header.m_imultiset.erase(it_old);
1267 }
1268 else{
1269 BOOST_ASSERT(0);
1270 return 0;
1271 }
1272
1273
1274 m_header.m_allocated += (size_type)block->m_size*Alignment;
1275 received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1276
1277
1278 priv_mark_as_allocated_block(block);
1279
1280
1281
1282 TreeHook *t = static_cast<TreeHook*>(block);
1283
1284 std::size_t tree_hook_offset_in_block = std::size_t((char*)t - (char*)block);
1285
1286 char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
1287 const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
1288 std::memset(ptr, 0, s);
1289 this->priv_next_block(block)->m_prev_size = 0;
1290 return priv_get_user_buffer(block);
1291 }
1292
1293 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1294 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
1295 {
1296 if(!addr) return;
1297
1298 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
1299
1300 return this->priv_deallocate(addr);
1301 }
1302
1303 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1304 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
1305 {
1306 if(!addr) return;
1307
1308 block_ctrl *block = priv_get_block(addr);
1309
1310
1311 BOOST_ASSERT(priv_is_allocated_block(block));
1312
1313
1314 algo_impl_t::assert_alignment(addr);
1315
1316 size_type block_old_size = Alignment*(size_type)block->m_size;
1317 BOOST_ASSERT(m_header.m_allocated >= block_old_size);
1318
1319
1320 m_header.m_allocated -= block_old_size;
1321
1322
1323 block_ctrl *block_to_insert = block;
1324
1325
1326 block_ctrl *const next_block = priv_next_block(block);
1327 const bool merge_with_prev = !priv_is_prev_allocated(block);
1328 const bool merge_with_next = !priv_is_allocated_block(next_block);
1329
1330
1331 if(merge_with_prev || merge_with_next){
1332
1333 if(merge_with_prev){
1334
1335 block_to_insert = priv_prev_block(block);
1336 block_to_insert->m_size = size_type(block_to_insert->m_size + block->m_size) & block_ctrl::size_mask;
1337 BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1338 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*block_to_insert));
1339 }
1340
1341 if(merge_with_next){
1342 block_to_insert->m_size = size_type(block_to_insert->m_size + next_block->m_size) & block_ctrl::size_mask;
1343 BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1344 const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
1345 m_header.m_imultiset.erase(next_it);
1346 }
1347 }
1348 priv_mark_as_free_block(block_to_insert);
1349 m_header.m_imultiset.insert(*block_to_insert);
1350 }
1351
1352 #endif
1353
1354 }
1355 }
1356
1357 #include <boost/interprocess/detail/config_end.hpp>
1358
1359 #endif