File indexing completed on 2026-05-11 08:24:34
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
0012 #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
0013
0014 #ifndef BOOST_CONFIG_HPP
0015 # include <boost/config.hpp>
0016 #endif
0017 0018 ">#
0019 #if defined(BOOST_HAS_PRAGMA_ONCE)
0020 # pragma once
0021 #endif
0022
0023 #include <boost/interprocess/detail/config_begin.hpp>
0024 #include <boost/interprocess/detail/workaround.hpp>
0025
0026
0027 #include <boost/interprocess/containers/allocation_type.hpp>
0028 #include <boost/interprocess/exceptions.hpp>
0029 #include <boost/interprocess/interprocess_fwd.hpp>
0030 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
0031 #include <boost/interprocess/offset_ptr.hpp>
0032 #include <boost/interprocess/sync/scoped_lock.hpp>
0033
0034 #include <boost/interprocess/detail/min_max.hpp>
0035 #include <boost/interprocess/detail/math_functions.hpp>
0036 #include <boost/interprocess/detail/type_traits.hpp>
0037 #include <boost/interprocess/detail/utilities.hpp>
0038
0039 #include <boost/container/detail/multiallocation_chain.hpp>
0040
0041 #include <boost/container/detail/placement_new.hpp>
0042
0043 #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
0044 #include <boost/move/detail/force_ptr.hpp> //make_unsigned, alignment_of
0045
0046 #include <boost/intrusive/pointer_traits.hpp>
0047 #include <boost/intrusive/set.hpp>
0048
0049 #include <boost/assert.hpp>
0050
0051 #include <climits>
0052 #include <cstring>
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 namespace boost {
0065 namespace interprocess {
0066
0067
0068
0069 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0070 class rbtree_best_fit
0071 {
0072 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0073
0074 rbtree_best_fit();
0075 rbtree_best_fit(const rbtree_best_fit &);
0076 rbtree_best_fit &operator=(const rbtree_best_fit &);
0077
0078 private:
0079 struct block_ctrl;
0080 typedef typename boost::intrusive::
0081 pointer_traits<VoidPointer>::template
0082 rebind_pointer<block_ctrl>::type block_ctrl_ptr;
0083
0084 typedef typename boost::intrusive::
0085 pointer_traits<VoidPointer>::template
0086 rebind_pointer<char>::type char_ptr;
0087
0088 #endif
0089
0090 public:
0091
0092 typedef MutexFamily mutex_family;
0093
0094 typedef VoidPointer void_pointer;
0095 typedef ipcdetail::basic_multiallocation_chain<VoidPointer> multiallocation_chain;
0096
0097 typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
0098 typedef typename boost::container::dtl::make_unsigned<difference_type>::type size_type;
0099
0100 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0101
0102 private:
0103
0104 typedef typename bi::make_set_base_hook
0105 < bi::void_pointer<VoidPointer>
0106 , bi::optimize_size<true>
0107 , bi::link_mode<bi::normal_link> >::type TreeHook;
0108
0109 struct SizeHolder
0110 {
0111 static const size_type size_mask = size_type(-1) >> 2;
0112
0113
0114
0115 size_type m_prev_size;
0116
0117
0118 size_type m_size : sizeof(size_type)*CHAR_BIT - 2;
0119 size_type m_prev_allocated : 1;
0120 size_type m_allocated : 1;
0121 };
0122
0123
0124 struct block_ctrl
0125 : public SizeHolder
0126
0127 , public TreeHook
0128 {
0129 block_ctrl()
0130 {
0131 this->SizeHolder::m_size = 0;
0132 this->SizeHolder::m_allocated = 0;
0133 this->SizeHolder::m_prev_allocated = 0;
0134 }
0135
0136 friend bool operator<(const block_ctrl &a, const block_ctrl &b)
0137 { return a.SizeHolder::m_size < b.SizeHolder::m_size; }
0138
0139 friend bool operator==(const block_ctrl &a, const block_ctrl &b)
0140 { return a.SizeHolder::m_size == b.SizeHolder::m_size; }
0141 };
0142
0143 struct size_block_ctrl_compare
0144 {
0145 bool operator()(size_type size, const block_ctrl &block) const
0146 { return size < block.m_size; }
0147
0148 bool operator()(const block_ctrl &block, size_type size) const
0149 { return block.m_size < size; }
0150 };
0151
0152
0153 typedef typename MutexFamily::mutex_type mutex_type;
0154 typedef typename bi::make_multiset
0155 <block_ctrl, bi::base_hook<TreeHook> >::type Imultiset;
0156
0157 typedef typename Imultiset::iterator imultiset_iterator;
0158 typedef typename Imultiset::const_iterator imultiset_const_iterator;
0159
0160
0161
0162 struct header_t : public mutex_type
0163 {
0164 Imultiset m_imultiset;
0165
0166
0167 size_type m_extra_hdr_bytes;
0168
0169 size_type m_allocated;
0170
0171 size_type m_size;
0172 } m_header;
0173
0174 friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
0175
0176 typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
0177
0178 public:
0179 #endif
0180
0181
0182
0183
0184 rbtree_best_fit (size_type size, size_type extra_hdr_bytes);
0185
0186
0187 ~rbtree_best_fit();
0188
0189
0190 static size_type get_min_size (size_type extra_hdr_bytes);
0191
0192
0193
0194
0195
0196 void* allocate (size_type nbytes);
0197
0198
0199 void deallocate(void* addr);
0200
0201 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0202
0203
0204
0205
0206
0207 void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
0208 {
0209
0210 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0211
0212 algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
0213 }
0214
0215
0216
0217 void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
0218 {
0219
0220 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0221
0222 algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
0223 }
0224
0225
0226
0227 void deallocate_many(multiallocation_chain &chain);
0228
0229 template<class T>
0230 T* allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
0231 size_type& prefer_in_recvd_out_size, T*& reuse);
0232
0233 void* raw_allocation_command(boost::interprocess::allocation_type command, size_type limit_object,
0234 size_type& prefer_in_recvd_out_size,
0235 void*& reuse_ptr, size_type sizeof_object = 1);
0236
0237 #endif
0238
0239
0240 size_type get_size() const;
0241
0242
0243 size_type get_free_memory() const;
0244
0245
0246
0247 void zero_free_memory();
0248
0249
0250
0251 void grow(size_type extra_size);
0252
0253
0254 void shrink_to_fit();
0255
0256
0257 bool all_memory_deallocated();
0258
0259
0260
0261 bool check_sanity();
0262
0263
0264 size_type size(const void *ptr) const;
0265
0266
0267
0268 void* allocate_aligned (size_type nbytes, size_type alignment);
0269
0270 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0271 private:
0272 static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
0273
0274 block_ctrl *priv_first_block();
0275
0276 block_ctrl *priv_end_block();
0277
0278 void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
0279 size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
0280
0281
0282
0283 void * priv_allocate( boost::interprocess::allocation_type command
0284 , size_type limit_size, size_type &prefer_in_recvd_out_size
0285 , void *&reuse_ptr, size_type backwards_multiple = 1);
0286
0287
0288 static block_ctrl *priv_get_block(const void *ptr);
0289
0290
0291 static void *priv_get_user_buffer(const block_ctrl *block);
0292
0293
0294
0295 static size_type priv_get_total_units(size_type userbytes);
0296
0297
0298 bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
0299
0300
0301 void* priv_expand_both_sides(boost::interprocess::allocation_type command
0302 ,size_type min_size
0303 ,size_type &prefer_in_recvd_out_size
0304 ,void *reuse_ptr
0305 ,bool only_preferred_backwards
0306 ,size_type backwards_multiple);
0307
0308
0309 bool priv_is_prev_allocated(block_ctrl *ptr);
0310
0311
0312 static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
0313
0314
0315 static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
0316
0317
0318 static block_ctrl * priv_prev_block(block_ctrl *ptr);
0319
0320
0321 static block_ctrl * priv_next_block(block_ctrl *ptr);
0322
0323
0324 bool priv_is_allocated_block(block_ctrl *ptr);
0325
0326
0327 void priv_mark_as_allocated_block(block_ctrl *ptr);
0328
0329
0330 void priv_mark_new_allocated_block(block_ctrl *ptr)
0331 { return priv_mark_as_allocated_block(ptr); }
0332
0333
0334 void priv_mark_as_free_block(block_ctrl *ptr);
0335
0336
0337
0338 void* priv_check_and_allocate(size_type units
0339 ,block_ctrl* block
0340 ,size_type &received_size);
0341
0342 void priv_deallocate(void *addr);
0343
0344
0345 void priv_add_segment(void *addr, size_type size);
0346
0347 public:
0348
0349 static const size_type Alignment = !MemAlignment
0350 ? size_type(::boost::container::dtl::alignment_of
0351 < ::boost::container::dtl::max_align_t>::value)
0352 : size_type(MemAlignment)
0353 ;
0354
0355 private:
0356
0357 BOOST_INTERPROCESS_STATIC_ASSERT((Alignment >= 4));
0358
0359 BOOST_INTERPROCESS_STATIC_ASSERT((Alignment >= ::boost::container::dtl::alignment_of<void_pointer>::value));
0360 static const size_type AlignmentMask = (Alignment - 1);
0361 static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
0362 static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
0363 static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
0364 static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment;
0365 static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
0366 static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment;
0367 static const size_type UsableByPreviousChunk = sizeof(size_type);
0368
0369
0370 BOOST_INTERPROCESS_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
0371 #endif
0372 public:
0373 static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
0374 };
0375
0376 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
0377
0378 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0379 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0380 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0381 ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
0382 {
0383 size_type uint_this = (std::size_t)this_ptr;
0384 size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
0385 size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
0386 size_type block1_off = aligned_main_hdr_end - uint_this;
0387 algo_impl_t::assert_alignment(aligned_main_hdr_end);
0388 algo_impl_t::assert_alignment(uint_this + block1_off);
0389 return block1_off;
0390 }
0391
0392 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0393 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0394 priv_add_segment(void *addr, size_type segment_size)
0395 {
0396
0397 algo_impl_t::check_alignment(addr);
0398
0399 BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
0400
0401
0402 block_ctrl *first_big_block = ::new(addr, boost_container_new_t()) block_ctrl;
0403 first_big_block->m_size = (segment_size/Alignment - EndCtrlBlockUnits) & block_ctrl::size_mask;
0404 BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
0405
0406
0407 SizeHolder *end_block =
0408 ::new(reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment, boost_container_new_t()) SizeHolder;
0409
0410
0411 priv_mark_as_free_block (first_big_block);
0412 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0413 first_big_block->m_prev_size = end_block->m_size =
0414 size_type(reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignment) & block_ctrl::size_mask;
0415 #else
0416 first_big_block->m_prev_size = end_block->m_size =
0417 size_type(reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment & block_ctrl::size_mask;
0418 #endif
0419 end_block->m_allocated = 1;
0420 first_big_block->m_prev_allocated = 1;
0421
0422 BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
0423 BOOST_ASSERT(priv_prev_block((block_ctrl*)end_block) == first_big_block);
0424 BOOST_ASSERT(priv_first_block() == first_big_block);
0425 BOOST_ASSERT(priv_end_block() == end_block);
0426
0427
0428
0429
0430
0431 BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
0432 < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
0433
0434 m_header.m_imultiset.insert(*first_big_block);
0435 }
0436
0437 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0438 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
0439 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0440 ::priv_first_block()
0441 {
0442 const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0443 return move_detail::force_ptr<block_ctrl*>(reinterpret_cast<char*>(this) + block1_off);
0444 }
0445
0446 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0447 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
0448 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
0449 ::priv_end_block()
0450 {
0451 const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0452 const size_type original_first_block_size = (m_header.m_size - block1_off)/Alignment - EndCtrlBlockUnits;
0453 block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
0454 (reinterpret_cast<char*>(this) + block1_off + original_first_block_size*Alignment);
0455 return end_block;
0456 }
0457
0458 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0459 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0460 rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
0461 {
0462
0463 m_header.m_allocated = 0;
0464 m_header.m_size = segment_size;
0465 m_header.m_extra_hdr_bytes = extra_hdr_bytes;
0466
0467
0468
0469 BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
0470 size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes);
0471 priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
0472 }
0473
0474 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0475 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
0476 {
0477
0478
0479
0480 }
0481
0482 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0483 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
0484 {
0485
0486 block_ctrl *first_block = priv_first_block();
0487 block_ctrl *old_end_block = priv_end_block();
0488 size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
0489 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
0490
0491
0492 m_header.m_size += extra_size;
0493
0494
0495 if((m_header.m_size - old_border_offset) < BlockCtrlBytes){
0496 return;
0497 }
0498
0499
0500 size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
0501 block_ctrl *new_end_block = move_detail::force_ptr<block_ctrl*>
0502 (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
0503
0504
0505
0506
0507 new_end_block->m_allocated = 1;
0508 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0509 new_end_block->m_size = size_type(reinterpret_cast<char*>(first_block) -
0510 reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
0511 #else
0512 new_end_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
0513 reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
0514 #endif
0515 first_block->m_prev_size = new_end_block->m_size;
0516 first_block->m_prev_allocated = 1;
0517 BOOST_ASSERT(new_end_block == priv_end_block());
0518
0519
0520 block_ctrl *new_block = old_end_block;
0521 new_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
0522 reinterpret_cast<char*>(new_block))/Alignment & block_ctrl::size_mask;
0523 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
0524 priv_mark_as_allocated_block(new_block);
0525 BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
0526
0527 m_header.m_allocated += (size_type)new_block->m_size*Alignment;
0528
0529
0530 this->priv_deallocate(priv_get_user_buffer(new_block));
0531 }
0532
0533 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0534 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
0535 {
0536
0537 block_ctrl *first_block = priv_first_block();
0538 algo_impl_t::assert_alignment(first_block);
0539
0540
0541 block_ctrl *old_end_block = priv_end_block();
0542 algo_impl_t::assert_alignment(old_end_block);
0543 size_type old_end_block_size = old_end_block->m_size;
0544
0545 void *unique_buffer = 0;
0546 block_ctrl *last_block;
0547
0548 if(priv_next_block(first_block) == old_end_block){
0549
0550 size_type ignore_recvd = 0;
0551 void *ignore_reuse = 0;
0552 unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
0553
0554 if(!unique_buffer)
0555 return;
0556
0557 algo_impl_t::assert_alignment(unique_buffer);
0558 block_ctrl *unique_block = priv_get_block(unique_buffer);
0559 BOOST_ASSERT(priv_is_allocated_block(unique_block));
0560 algo_impl_t::assert_alignment(unique_block);
0561 last_block = priv_next_block(unique_block);
0562 BOOST_ASSERT(!priv_is_allocated_block(last_block));
0563 algo_impl_t::assert_alignment(last_block);
0564 }
0565 else{
0566
0567 if(priv_is_prev_allocated(old_end_block))
0568 return;
0569
0570 last_block = priv_prev_block(old_end_block);
0571 }
0572
0573 size_type last_block_size = last_block->m_size;
0574
0575
0576 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
0577
0578 size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
0579 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
0580
0581 block_ctrl *new_end_block = last_block;
0582 algo_impl_t::assert_alignment(new_end_block);
0583
0584
0585 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
0586 new_end_block->m_size =
0587 size_type(reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
0588 first_block->m_prev_size = new_end_block->m_size;
0589 #else
0590 new_end_block->m_size =
0591 size_type(reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
0592 first_block->m_prev_size = new_end_block->m_size;
0593 #endif
0594
0595 new_end_block->m_allocated = 1;
0596 (void)last_block_size;
0597 (void)old_end_block_size;
0598 BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
0599
0600
0601 m_header.m_size = shrunk_border_offset & block_ctrl::size_mask;
0602 BOOST_ASSERT(priv_end_block() == new_end_block);
0603 if(unique_buffer)
0604 priv_deallocate(unique_buffer);
0605 }
0606
0607 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0608 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0609 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const
0610 { return m_header.m_size; }
0611
0612 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0613 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0614 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const
0615 {
0616 return m_header.m_size - m_header.m_allocated -
0617 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0618 }
0619
0620 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0621 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0622 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0623 get_min_size (size_type extra_hdr_bytes)
0624 {
0625 return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
0626 algo_impl_t::ceil_units(extra_hdr_bytes) +
0627 BlockCtrlUnits + EndCtrlBlockUnits)*Alignment;
0628 }
0629
0630 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0631 inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0632 all_memory_deallocated()
0633 {
0634
0635 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0636
0637 size_type block1_off =
0638 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0639
0640 return m_header.m_allocated == 0 &&
0641 m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
0642 (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
0643 && m_header.m_imultiset.begin()->m_size ==
0644 (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
0645 }
0646
0647 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0648 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0649 check_sanity()
0650 {
0651
0652 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0653
0654 imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
0655
0656 size_type free_memory = 0;
0657
0658
0659 for(; ib != ie; ++ib){
0660 free_memory += (size_type)ib->m_size*Alignment;
0661 if(!algo_impl_t::check_alignment(&*ib))
0662 return false;
0663 }
0664
0665
0666 if(m_header.m_allocated > m_header.m_size){
0667 return false;
0668 }
0669
0670 size_type block1_off =
0671 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
0672
0673
0674 if(free_memory > (m_header.m_size - block1_off)){
0675 return false;
0676 }
0677 return true;
0678 }
0679
0680 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0681 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0682 allocate(size_type nbytes)
0683 {
0684
0685 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0686
0687 size_type ignore_recvd = nbytes;
0688 void *ignore_reuse = 0;
0689 return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
0690 }
0691
0692 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0693 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0694 allocate_aligned(size_type nbytes, size_type alignment)
0695 {
0696
0697 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0698
0699 return algo_impl_t::allocate_aligned(this, nbytes, alignment);
0700 }
0701
0702 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0703 template<class T>
0704 inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0705 allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
0706 size_type &prefer_in_recvd_out_size, T *&reuse)
0707 {
0708 void* raw_reuse = reuse;
0709 void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
0710 reuse = static_cast<T*>(raw_reuse);
0711 BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
0712 return static_cast<T*>(ret);
0713 }
0714
0715 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0716 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0717 raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
0718 size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
0719 {
0720 size_type const preferred_objects = prefer_in_recvd_out_objects;
0721 if(!sizeof_object)
0722 return reuse_ptr = 0, static_cast<void*>(0);
0723 if(command & boost::interprocess::try_shrink_in_place){
0724 if(!reuse_ptr) return static_cast<void*>(0);
0725 const bool success = algo_impl_t::try_shrink
0726 ( this, reuse_ptr, limit_objects*sizeof_object
0727 , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
0728 prefer_in_recvd_out_objects /= sizeof_object;
0729 return success ? reuse_ptr : 0;
0730 }
0731 else{
0732 return priv_allocation_command
0733 (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
0734 }
0735 }
0736
0737
0738 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0739 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0740 priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
0741 size_type &prefer_in_recvd_out_size,
0742 void *&reuse_ptr, size_type sizeof_object)
0743 {
0744 void* ret;
0745 size_type const preferred_size = prefer_in_recvd_out_size;
0746 size_type const max_count = m_header.m_size/sizeof_object;
0747 if(limit_size > max_count || preferred_size > max_count){
0748 return reuse_ptr = 0, static_cast<void*>(0);
0749 }
0750 size_type l_size = limit_size*sizeof_object;
0751 size_type p_size = preferred_size*sizeof_object;
0752 size_type r_size;
0753 {
0754
0755 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0756
0757 ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
0758 }
0759 prefer_in_recvd_out_size = r_size/sizeof_object;
0760 return ret;
0761 }
0762
0763 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0764 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
0765 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0766 size(const void *ptr) const
0767 {
0768
0769
0770
0771 return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
0772 }
0773
0774 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0775 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
0776 {
0777
0778 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0779
0780 imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
0781
0782
0783 while(ib != ie){
0784
0785 volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
0786 size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
0787 while(s--){
0788 *ptr++ = 0;
0789 }
0790
0791
0792
0793
0794
0795 ++ib;
0796 }
0797 }
0798
0799 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0800 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0801 priv_expand_both_sides(boost::interprocess::allocation_type command
0802 ,size_type min_size
0803 ,size_type &prefer_in_recvd_out_size
0804 ,void *reuse_ptr
0805 ,bool only_preferred_backwards
0806 ,size_type backwards_multiple)
0807 {
0808 size_type const preferred_size = prefer_in_recvd_out_size;
0809 algo_impl_t::assert_alignment(reuse_ptr);
0810 if(command & boost::interprocess::expand_fwd){
0811 if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
0812 return reuse_ptr;
0813 }
0814 else{
0815 prefer_in_recvd_out_size = this->size(reuse_ptr);
0816 if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
0817 return reuse_ptr;
0818 }
0819
0820 if(backwards_multiple){
0821 BOOST_ASSERT(0 == (min_size % backwards_multiple));
0822 BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
0823 }
0824
0825 if(command & boost::interprocess::expand_bwd){
0826
0827 block_ctrl *reuse = priv_get_block(reuse_ptr);
0828
0829
0830 algo_impl_t::assert_alignment(reuse);
0831
0832 block_ctrl *prev_block;
0833
0834
0835 if(priv_is_prev_allocated(reuse)){
0836 return 0;
0837 }
0838
0839 prev_block = priv_prev_block(reuse);
0840 BOOST_ASSERT(!priv_is_allocated_block(prev_block));
0841
0842
0843 BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
0844 algo_impl_t::assert_alignment(prev_block);
0845
0846 size_type needs_backwards_aligned;
0847 size_type lcm;
0848 if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
0849 ( backwards_multiple
0850 , prefer_in_recvd_out_size
0851 , only_preferred_backwards ? preferred_size : min_size
0852 , lcm, needs_backwards_aligned)){
0853 return 0;
0854 }
0855
0856
0857 if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
0858
0859 if(command & boost::interprocess::expand_fwd){
0860 size_type received_size2;
0861 if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
0862 BOOST_ASSERT(0);
0863 }
0864 BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
0865 }
0866
0867 if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
0868 block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
0869 (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
0870
0871
0872 new_block->m_size =
0873 (AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment) & block_ctrl::size_mask;
0874 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
0875 priv_mark_as_allocated_block(new_block);
0876
0877 prev_block->m_size = size_type(reinterpret_cast<char*>(new_block) -
0878 reinterpret_cast<char*>(prev_block))/Alignment & block_ctrl::size_mask;
0879 BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
0880 priv_mark_as_free_block(prev_block);
0881
0882
0883
0884
0885 {
0886 imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
0887 imultiset_iterator was_smaller_it(prev_block_it);
0888 if(prev_block_it != m_header.m_imultiset.begin() &&
0889 (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
0890 m_header.m_imultiset.erase(prev_block_it);
0891 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
0892 }
0893 }
0894
0895 prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
0896 m_header.m_allocated += needs_backwards_aligned;
0897
0898
0899 algo_impl_t::assert_alignment(new_block);
0900
0901
0902
0903 void *p = priv_get_user_buffer(new_block);
0904 void *user_ptr = reinterpret_cast<char*>(p);
0905 BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
0906 algo_impl_t::assert_alignment(user_ptr);
0907 return user_ptr;
0908 }
0909
0910
0911 else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
0912 0 == ((prev_block->m_size*Alignment) % lcm)) {
0913
0914 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
0915
0916
0917
0918 prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
0919
0920 m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
0921
0922 prev_block->m_size = size_type(prev_block->m_size + reuse->m_size) & block_ctrl::size_mask;
0923 BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
0924 priv_mark_as_allocated_block(prev_block);
0925
0926
0927
0928 void *user_ptr = priv_get_user_buffer(prev_block);
0929 BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
0930 algo_impl_t::assert_alignment(user_ptr);
0931 return user_ptr;
0932 }
0933 else{
0934
0935 }
0936 }
0937 }
0938 return 0;
0939 }
0940
0941 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0942 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0943 deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
0944 {
0945
0946 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
0947
0948 algo_impl_t::deallocate_many(this, chain);
0949 }
0950
0951 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
0952 void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
0953 priv_allocate(boost::interprocess::allocation_type command
0954 ,size_type limit_size
0955 ,size_type &prefer_in_recvd_out_size
0956 ,void *&reuse_ptr
0957 ,size_type backwards_multiple)
0958 {
0959 size_type const preferred_size = prefer_in_recvd_out_size;
0960 if(command & boost::interprocess::shrink_in_place){
0961 if(!reuse_ptr) return static_cast<void*>(0);
0962 bool success =
0963 algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
0964 return success ? reuse_ptr : 0;
0965 }
0966
0967 prefer_in_recvd_out_size = 0;
0968
0969 if(limit_size > preferred_size)
0970 return reuse_ptr = 0, static_cast<void*>(0);
0971
0972
0973 size_type preferred_units = priv_get_total_units(preferred_size);
0974
0975
0976 size_type limit_units = priv_get_total_units(limit_size);
0977
0978
0979 prefer_in_recvd_out_size = preferred_size;
0980 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
0981 void *ret = priv_expand_both_sides
0982 (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
0983 if(ret)
0984 return ret;
0985 }
0986
0987 if(command & boost::interprocess::allocate_new){
0988 size_block_ctrl_compare comp;
0989 imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
0990
0991 if(it != m_header.m_imultiset.end()){
0992 return reuse_ptr = 0, this->priv_check_and_allocate
0993 (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
0994 }
0995
0996 if(it != m_header.m_imultiset.begin()&&
0997 (--it)->m_size >= limit_units){
0998 return reuse_ptr = 0, this->priv_check_and_allocate
0999 (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
1000 }
1001 }
1002
1003
1004
1005 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
1006 return priv_expand_both_sides
1007 (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
1008 }
1009 return reuse_ptr = 0, static_cast<void*>(0);
1010 }
1011
1012 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1013 inline
1014 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1015 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
1016 {
1017 return const_cast<block_ctrl*>
1018 (move_detail::force_ptr<const block_ctrl*>
1019 (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
1020 }
1021
1022 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1023 inline
1024 void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1025 priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1026 { return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }
1027
1028 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1029 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
1030 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1031 priv_get_total_units(size_type userbytes)
1032 {
1033 if(userbytes < UsableByPreviousChunk)
1034 userbytes = UsableByPreviousChunk;
1035 size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
1036 if(units < BlockCtrlUnits) units = BlockCtrlUnits;
1037 return units;
1038 }
1039
1040 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1041 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1042 priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
1043 {
1044 size_type const preferred_size = prefer_in_recvd_out_size;
1045
1046 block_ctrl *block = priv_get_block(ptr);
1047 size_type old_block_units = block->m_size;
1048
1049
1050 BOOST_ASSERT(priv_is_allocated_block(block));
1051
1052
1053 prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1054 if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
1055 return true;
1056
1057
1058 const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
1059 const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
1060
1061
1062 BOOST_ASSERT(min_user_units <= preferred_user_units);
1063
1064 block_ctrl *next_block;
1065
1066 if(priv_is_allocated_block(next_block = priv_next_block(block))){
1067 return prefer_in_recvd_out_size >= min_size;
1068 }
1069 algo_impl_t::assert_alignment(next_block);
1070
1071
1072 const size_type merged_units = old_block_units + (size_type)next_block->m_size;
1073
1074
1075 const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
1076
1077 if(merged_user_units < min_user_units){
1078 prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
1079 return false;
1080 }
1081
1082
1083 size_type intended_user_units = (merged_user_units < preferred_user_units) ?
1084 merged_user_units : preferred_user_units;
1085
1086
1087 const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
1088
1089
1090 if((merged_units - intended_units) >= BlockCtrlUnits){
1091
1092
1093
1094 BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
1095 const size_type rem_units = merged_units - intended_units;
1096
1097
1098
1099
1100
1101
1102
1103
1104 imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
1105 m_header.m_imultiset.erase(old_next_block_it);
1106
1107
1108 block_ctrl *rem_block =
1109 ::new(reinterpret_cast<char*>(block) + intended_units*Alignment, boost_container_new_t()) block_ctrl;
1110 rem_block->m_size = rem_units & block_ctrl::size_mask;
1111 algo_impl_t::assert_alignment(rem_block);
1112 BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1113 priv_mark_as_free_block(rem_block);
1114 m_header.m_imultiset.insert(*rem_block);
1115
1116
1117 block->m_size = (intended_user_units + AllocatedCtrlUnits) & block_ctrl::size_mask;
1118 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1119 m_header.m_allocated += (intended_units - old_block_units)*Alignment;
1120 }
1121
1122 else{
1123
1124 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
1125
1126
1127 block->m_size = merged_units & block_ctrl::size_mask;
1128 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1129 m_header.m_allocated += (merged_units - old_block_units)*Alignment;
1130 }
1131 priv_mark_as_allocated_block(block);
1132 prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1133 return true;
1134 }
1135
1136 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1137 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1138 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
1139 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1140 {
1141 BOOST_ASSERT(!ptr->m_prev_allocated);
1142 return move_detail::force_ptr<block_ctrl*>
1143 (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
1144 }
1145
1146
1147
1148 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1149 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1150 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
1151 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
1152 {
1153
1154
1155 BOOST_ASSERT(first_segment_block->m_prev_allocated);
1156 block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
1157 (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
1158 (void)end_block;
1159 BOOST_ASSERT(end_block->m_allocated == 1);
1160 BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
1161 BOOST_ASSERT(end_block > first_segment_block);
1162 return end_block;
1163 }
1164
1165 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1166 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1167 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
1168 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
1169 {
1170
1171
1172 BOOST_ASSERT(end_segment_block->m_allocated);
1173 block_ctrl *first_block = move_detail::force_ptr<block_ctrl*>
1174 (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
1175 (void)first_block;
1176 BOOST_ASSERT(first_block->m_prev_allocated == 1);
1177 BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
1178 BOOST_ASSERT(end_segment_block > first_block);
1179 return first_block;
1180 }
1181
1182
1183 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1184 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1185 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
1186 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1187 {
1188 return move_detail::force_ptr<block_ctrl*>
1189 (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
1190 }
1191
1192 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1193 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
1194 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1195 {
1196 bool allocated = block->m_allocated != 0;
1197 #ifndef NDEBUG
1198 if(block != priv_end_block()){
1199 block_ctrl *next_block = move_detail::force_ptr<block_ctrl*>
1200 (reinterpret_cast<char*>(block) + block->m_size*Alignment);
1201 bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
1202 (void)next_block_prev_allocated;
1203 BOOST_ASSERT(allocated == next_block_prev_allocated);
1204 }
1205 #endif
1206 return allocated;
1207 }
1208
1209 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1210 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
1211 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1212 {
1213 if(block->m_prev_allocated){
1214 return true;
1215 }
1216 else{
1217 #ifndef NDEBUG
1218 if(block != priv_first_block()){
1219 block_ctrl *prev = priv_prev_block(block);
1220 (void)prev;
1221 BOOST_ASSERT(!prev->m_allocated);
1222 BOOST_ASSERT(prev->m_size == block->m_prev_size);
1223 }
1224 #endif
1225 return false;
1226 }
1227 }
1228
1229 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1230 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
1231 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1232 {
1233 block->m_allocated = 1;
1234 move_detail::force_ptr<block_ctrl*>
1235 (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
1236 }
1237
1238 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1239 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
1240 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1241 {
1242 block->m_allocated = 0;
1243 block_ctrl *next_block = priv_next_block(block);
1244 next_block->m_prev_allocated = 0;
1245 next_block->m_prev_size = block->m_size;
1246 }
1247
1248 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1249 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
1250 (size_type nunits
1251 ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
1252 ,size_type &received_size)
1253 {
1254 size_type upper_nunits = nunits + BlockCtrlUnits;
1255 imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
1256 algo_impl_t::assert_alignment(block);
1257
1258 if (block->m_size >= upper_nunits){
1259
1260
1261
1262 size_type block_old_size = block->m_size;
1263 block->m_size = nunits & block_ctrl::size_mask;
1264 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1265
1266
1267 block_ctrl *rem_block =
1268 ::new(reinterpret_cast<char*>(block) + Alignment*nunits, boost_container_new_t()) block_ctrl;
1269 algo_impl_t::assert_alignment(rem_block);
1270 rem_block->m_size = (block_old_size - nunits) & block_ctrl::size_mask;
1271 BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1272 priv_mark_as_free_block(rem_block);
1273
1274
1275
1276 m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
1277 }
1278 else if (block->m_size >= nunits){
1279 m_header.m_imultiset.erase(it_old);
1280 }
1281 else{
1282 BOOST_ASSERT(0);
1283 return 0;
1284 }
1285
1286
1287 m_header.m_allocated += (size_type)block->m_size*Alignment;
1288 received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1289
1290
1291 priv_mark_as_allocated_block(block);
1292
1293
1294
1295 TreeHook *t = static_cast<TreeHook*>(block);
1296
1297 std::size_t tree_hook_offset_in_block = std::size_t((char*)t - (char*)block);
1298
1299 char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
1300 const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
1301 std::memset(ptr, 0, s);
1302 this->priv_next_block(block)->m_prev_size = 0;
1303 return priv_get_user_buffer(block);
1304 }
1305
1306 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1307 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
1308 {
1309 if(!addr) return;
1310
1311 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
1312
1313 return this->priv_deallocate(addr);
1314 }
1315
1316 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1317 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
1318 {
1319 if(!addr) return;
1320
1321 block_ctrl *block = priv_get_block(addr);
1322
1323
1324 BOOST_ASSERT(priv_is_allocated_block(block));
1325
1326
1327 algo_impl_t::assert_alignment(addr);
1328
1329 size_type block_old_size = Alignment*(size_type)block->m_size;
1330 BOOST_ASSERT(m_header.m_allocated >= block_old_size);
1331
1332
1333 m_header.m_allocated -= block_old_size;
1334
1335
1336 block_ctrl *block_to_insert = block;
1337
1338
1339 block_ctrl *const next_block = priv_next_block(block);
1340 const bool merge_with_prev = !priv_is_prev_allocated(block);
1341 const bool merge_with_next = !priv_is_allocated_block(next_block);
1342
1343
1344 if(merge_with_prev || merge_with_next){
1345
1346 if(merge_with_prev){
1347
1348 block_to_insert = priv_prev_block(block);
1349 block_to_insert->m_size = size_type(block_to_insert->m_size + block->m_size) & block_ctrl::size_mask;
1350 BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1351 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*block_to_insert));
1352 }
1353
1354 if(merge_with_next){
1355 block_to_insert->m_size = size_type(block_to_insert->m_size + next_block->m_size) & block_ctrl::size_mask;
1356 BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1357 const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
1358 m_header.m_imultiset.erase(next_it);
1359 }
1360 }
1361 priv_mark_as_free_block(block_to_insert);
1362 m_header.m_imultiset.insert(*block_to_insert);
1363 }
1364
1365 #endif
1366
1367 }
1368 }
1369
1370 #include <boost/interprocess/detail/config_end.hpp>
1371
1372 #endif