Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:28:33

0001 //
0002 // detail/impl/scheduler.ipp
0003 // ~~~~~~~~~~~~~~~~~~~~~~~~~
0004 //
0005 // Copyright (c) 2003-2023 Christopher M. Kohlhoff (chris at kohlhoff dot com)
0006 //
0007 // Distributed under the Boost Software License, Version 1.0. (See accompanying
0008 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
0009 //
0010 
0011 #ifndef BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
0012 #define BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
0013 
0014 #if defined(_MSC_VER) && (_MSC_VER >= 1200)
0015 # pragma once
0016 #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
0017 
0018 #include <boost/asio/detail/config.hpp>
0019 
0020 #include <boost/asio/detail/concurrency_hint.hpp>
0021 #include <boost/asio/detail/event.hpp>
0022 #include <boost/asio/detail/limits.hpp>
0023 #include <boost/asio/detail/scheduler.hpp>
0024 #include <boost/asio/detail/scheduler_thread_info.hpp>
0025 #include <boost/asio/detail/signal_blocker.hpp>
0026 
0027 #if defined(BOOST_ASIO_HAS_IO_URING_AS_DEFAULT)
0028 # include <boost/asio/detail/io_uring_service.hpp>
0029 #else // defined(BOOST_ASIO_HAS_IO_URING_AS_DEFAULT)
0030 # include <boost/asio/detail/reactor.hpp>
0031 #endif // defined(BOOST_ASIO_HAS_IO_URING_AS_DEFAULT)
0032 
0033 #include <boost/asio/detail/push_options.hpp>
0034 
0035 namespace boost {
0036 namespace asio {
0037 namespace detail {
0038 
0039 class scheduler::thread_function
0040 {
0041 public:
0042   explicit thread_function(scheduler* s)
0043     : this_(s)
0044   {
0045   }
0046 
0047   void operator()()
0048   {
0049     boost::system::error_code ec;
0050     this_->run(ec);
0051   }
0052 
0053 private:
0054   scheduler* this_;
0055 };
0056 
0057 struct scheduler::task_cleanup
0058 {
0059   ~task_cleanup()
0060   {
0061     if (this_thread_->private_outstanding_work > 0)
0062     {
0063       boost::asio::detail::increment(
0064           scheduler_->outstanding_work_,
0065           this_thread_->private_outstanding_work);
0066     }
0067     this_thread_->private_outstanding_work = 0;
0068 
0069     // Enqueue the completed operations and reinsert the task at the end of
0070     // the operation queue.
0071     lock_->lock();
0072     scheduler_->task_interrupted_ = true;
0073     scheduler_->op_queue_.push(this_thread_->private_op_queue);
0074     scheduler_->op_queue_.push(&scheduler_->task_operation_);
0075   }
0076 
0077   scheduler* scheduler_;
0078   mutex::scoped_lock* lock_;
0079   thread_info* this_thread_;
0080 };
0081 
0082 struct scheduler::work_cleanup
0083 {
0084   ~work_cleanup()
0085   {
0086     if (this_thread_->private_outstanding_work > 1)
0087     {
0088       boost::asio::detail::increment(
0089           scheduler_->outstanding_work_,
0090           this_thread_->private_outstanding_work - 1);
0091     }
0092     else if (this_thread_->private_outstanding_work < 1)
0093     {
0094       scheduler_->work_finished();
0095     }
0096     this_thread_->private_outstanding_work = 0;
0097 
0098 #if defined(BOOST_ASIO_HAS_THREADS)
0099     if (!this_thread_->private_op_queue.empty())
0100     {
0101       lock_->lock();
0102       scheduler_->op_queue_.push(this_thread_->private_op_queue);
0103     }
0104 #endif // defined(BOOST_ASIO_HAS_THREADS)
0105   }
0106 
0107   scheduler* scheduler_;
0108   mutex::scoped_lock* lock_;
0109   thread_info* this_thread_;
0110 };
0111 
0112 scheduler::scheduler(boost::asio::execution_context& ctx,
0113     int concurrency_hint, bool own_thread, get_task_func_type get_task)
0114   : boost::asio::detail::execution_context_service_base<scheduler>(ctx),
0115     one_thread_(concurrency_hint == 1
0116         || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
0117           SCHEDULER, concurrency_hint)
0118         || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
0119           REACTOR_IO, concurrency_hint)),
0120     mutex_(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
0121           SCHEDULER, concurrency_hint)),
0122     task_(0),
0123     get_task_(get_task),
0124     task_interrupted_(true),
0125     outstanding_work_(0),
0126     stopped_(false),
0127     shutdown_(false),
0128     concurrency_hint_(concurrency_hint),
0129     thread_(0)
0130 {
0131   BOOST_ASIO_HANDLER_TRACKING_INIT;
0132 
0133   if (own_thread)
0134   {
0135     ++outstanding_work_;
0136     boost::asio::detail::signal_blocker sb;
0137     thread_ = new boost::asio::detail::thread(thread_function(this));
0138   }
0139 }
0140 
0141 scheduler::~scheduler()
0142 {
0143   if (thread_)
0144   {
0145     mutex::scoped_lock lock(mutex_);
0146     shutdown_ = true;
0147     stop_all_threads(lock);
0148     lock.unlock();
0149     thread_->join();
0150     delete thread_;
0151   }
0152 }
0153 
0154 void scheduler::shutdown()
0155 {
0156   mutex::scoped_lock lock(mutex_);
0157   shutdown_ = true;
0158   if (thread_)
0159     stop_all_threads(lock);
0160   lock.unlock();
0161 
0162   // Join thread to ensure task operation is returned to queue.
0163   if (thread_)
0164   {
0165     thread_->join();
0166     delete thread_;
0167     thread_ = 0;
0168   }
0169 
0170   // Destroy handler objects.
0171   while (!op_queue_.empty())
0172   {
0173     operation* o = op_queue_.front();
0174     op_queue_.pop();
0175     if (o != &task_operation_)
0176       o->destroy();
0177   }
0178 
0179   // Reset to initial state.
0180   task_ = 0;
0181 }
0182 
0183 void scheduler::init_task()
0184 {
0185   mutex::scoped_lock lock(mutex_);
0186   if (!shutdown_ && !task_)
0187   {
0188     task_ = get_task_(this->context());
0189     op_queue_.push(&task_operation_);
0190     wake_one_thread_and_unlock(lock);
0191   }
0192 }
0193 
0194 std::size_t scheduler::run(boost::system::error_code& ec)
0195 {
0196   ec = boost::system::error_code();
0197   if (outstanding_work_ == 0)
0198   {
0199     stop();
0200     return 0;
0201   }
0202 
0203   thread_info this_thread;
0204   this_thread.private_outstanding_work = 0;
0205   thread_call_stack::context ctx(this, this_thread);
0206 
0207   mutex::scoped_lock lock(mutex_);
0208 
0209   std::size_t n = 0;
0210   for (; do_run_one(lock, this_thread, ec); lock.lock())
0211     if (n != (std::numeric_limits<std::size_t>::max)())
0212       ++n;
0213   return n;
0214 }
0215 
0216 std::size_t scheduler::run_one(boost::system::error_code& ec)
0217 {
0218   ec = boost::system::error_code();
0219   if (outstanding_work_ == 0)
0220   {
0221     stop();
0222     return 0;
0223   }
0224 
0225   thread_info this_thread;
0226   this_thread.private_outstanding_work = 0;
0227   thread_call_stack::context ctx(this, this_thread);
0228 
0229   mutex::scoped_lock lock(mutex_);
0230 
0231   return do_run_one(lock, this_thread, ec);
0232 }
0233 
0234 std::size_t scheduler::wait_one(long usec, boost::system::error_code& ec)
0235 {
0236   ec = boost::system::error_code();
0237   if (outstanding_work_ == 0)
0238   {
0239     stop();
0240     return 0;
0241   }
0242 
0243   thread_info this_thread;
0244   this_thread.private_outstanding_work = 0;
0245   thread_call_stack::context ctx(this, this_thread);
0246 
0247   mutex::scoped_lock lock(mutex_);
0248 
0249   return do_wait_one(lock, this_thread, usec, ec);
0250 }
0251 
0252 std::size_t scheduler::poll(boost::system::error_code& ec)
0253 {
0254   ec = boost::system::error_code();
0255   if (outstanding_work_ == 0)
0256   {
0257     stop();
0258     return 0;
0259   }
0260 
0261   thread_info this_thread;
0262   this_thread.private_outstanding_work = 0;
0263   thread_call_stack::context ctx(this, this_thread);
0264 
0265   mutex::scoped_lock lock(mutex_);
0266 
0267 #if defined(BOOST_ASIO_HAS_THREADS)
0268   // We want to support nested calls to poll() and poll_one(), so any handlers
0269   // that are already on a thread-private queue need to be put on to the main
0270   // queue now.
0271   if (one_thread_)
0272     if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
0273       op_queue_.push(outer_info->private_op_queue);
0274 #endif // defined(BOOST_ASIO_HAS_THREADS)
0275 
0276   std::size_t n = 0;
0277   for (; do_poll_one(lock, this_thread, ec); lock.lock())
0278     if (n != (std::numeric_limits<std::size_t>::max)())
0279       ++n;
0280   return n;
0281 }
0282 
0283 std::size_t scheduler::poll_one(boost::system::error_code& ec)
0284 {
0285   ec = boost::system::error_code();
0286   if (outstanding_work_ == 0)
0287   {
0288     stop();
0289     return 0;
0290   }
0291 
0292   thread_info this_thread;
0293   this_thread.private_outstanding_work = 0;
0294   thread_call_stack::context ctx(this, this_thread);
0295 
0296   mutex::scoped_lock lock(mutex_);
0297 
0298 #if defined(BOOST_ASIO_HAS_THREADS)
0299   // We want to support nested calls to poll() and poll_one(), so any handlers
0300   // that are already on a thread-private queue need to be put on to the main
0301   // queue now.
0302   if (one_thread_)
0303     if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
0304       op_queue_.push(outer_info->private_op_queue);
0305 #endif // defined(BOOST_ASIO_HAS_THREADS)
0306 
0307   return do_poll_one(lock, this_thread, ec);
0308 }
0309 
0310 void scheduler::stop()
0311 {
0312   mutex::scoped_lock lock(mutex_);
0313   stop_all_threads(lock);
0314 }
0315 
0316 bool scheduler::stopped() const
0317 {
0318   mutex::scoped_lock lock(mutex_);
0319   return stopped_;
0320 }
0321 
0322 void scheduler::restart()
0323 {
0324   mutex::scoped_lock lock(mutex_);
0325   stopped_ = false;
0326 }
0327 
0328 void scheduler::compensating_work_started()
0329 {
0330   thread_info_base* this_thread = thread_call_stack::contains(this);
0331   BOOST_ASIO_ASSUME(this_thread != 0); // Only called from inside scheduler.
0332   ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
0333 }
0334 
0335 bool scheduler::can_dispatch()
0336 {
0337   return thread_call_stack::contains(this) != 0;
0338 }
0339 
0340 void scheduler::capture_current_exception()
0341 {
0342   if (thread_info_base* this_thread = thread_call_stack::contains(this))
0343     this_thread->capture_current_exception();
0344 }
0345 
0346 void scheduler::post_immediate_completion(
0347     scheduler::operation* op, bool is_continuation)
0348 {
0349 #if defined(BOOST_ASIO_HAS_THREADS)
0350   if (one_thread_ || is_continuation)
0351   {
0352     if (thread_info_base* this_thread = thread_call_stack::contains(this))
0353     {
0354       ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
0355       static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
0356       return;
0357     }
0358   }
0359 #else // defined(BOOST_ASIO_HAS_THREADS)
0360   (void)is_continuation;
0361 #endif // defined(BOOST_ASIO_HAS_THREADS)
0362 
0363   work_started();
0364   mutex::scoped_lock lock(mutex_);
0365   op_queue_.push(op);
0366   wake_one_thread_and_unlock(lock);
0367 }
0368 
0369 void scheduler::post_immediate_completions(std::size_t n,
0370     op_queue<scheduler::operation>& ops, bool is_continuation)
0371 {
0372 #if defined(BOOST_ASIO_HAS_THREADS)
0373   if (one_thread_ || is_continuation)
0374   {
0375     if (thread_info_base* this_thread = thread_call_stack::contains(this))
0376     {
0377       static_cast<thread_info*>(this_thread)->private_outstanding_work
0378         += static_cast<long>(n);
0379       static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
0380       return;
0381     }
0382   }
0383 #else // defined(BOOST_ASIO_HAS_THREADS)
0384   (void)is_continuation;
0385 #endif // defined(BOOST_ASIO_HAS_THREADS)
0386 
0387   increment(outstanding_work_, static_cast<long>(n));
0388   mutex::scoped_lock lock(mutex_);
0389   op_queue_.push(ops);
0390   wake_one_thread_and_unlock(lock);
0391 }
0392 
0393 void scheduler::post_deferred_completion(scheduler::operation* op)
0394 {
0395 #if defined(BOOST_ASIO_HAS_THREADS)
0396   if (one_thread_)
0397   {
0398     if (thread_info_base* this_thread = thread_call_stack::contains(this))
0399     {
0400       static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
0401       return;
0402     }
0403   }
0404 #endif // defined(BOOST_ASIO_HAS_THREADS)
0405 
0406   mutex::scoped_lock lock(mutex_);
0407   op_queue_.push(op);
0408   wake_one_thread_and_unlock(lock);
0409 }
0410 
0411 void scheduler::post_deferred_completions(
0412     op_queue<scheduler::operation>& ops)
0413 {
0414   if (!ops.empty())
0415   {
0416 #if defined(BOOST_ASIO_HAS_THREADS)
0417     if (one_thread_)
0418     {
0419       if (thread_info_base* this_thread = thread_call_stack::contains(this))
0420       {
0421         static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
0422         return;
0423       }
0424     }
0425 #endif // defined(BOOST_ASIO_HAS_THREADS)
0426 
0427     mutex::scoped_lock lock(mutex_);
0428     op_queue_.push(ops);
0429     wake_one_thread_and_unlock(lock);
0430   }
0431 }
0432 
0433 void scheduler::do_dispatch(
0434     scheduler::operation* op)
0435 {
0436   work_started();
0437   mutex::scoped_lock lock(mutex_);
0438   op_queue_.push(op);
0439   wake_one_thread_and_unlock(lock);
0440 }
0441 
0442 void scheduler::abandon_operations(
0443     op_queue<scheduler::operation>& ops)
0444 {
0445   op_queue<scheduler::operation> ops2;
0446   ops2.push(ops);
0447 }
0448 
0449 std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
0450     scheduler::thread_info& this_thread,
0451     const boost::system::error_code& ec)
0452 {
0453   while (!stopped_)
0454   {
0455     if (!op_queue_.empty())
0456     {
0457       // Prepare to execute first handler from queue.
0458       operation* o = op_queue_.front();
0459       op_queue_.pop();
0460       bool more_handlers = (!op_queue_.empty());
0461 
0462       if (o == &task_operation_)
0463       {
0464         task_interrupted_ = more_handlers;
0465 
0466         if (more_handlers && !one_thread_)
0467           wakeup_event_.unlock_and_signal_one(lock);
0468         else
0469           lock.unlock();
0470 
0471         task_cleanup on_exit = { this, &lock, &this_thread };
0472         (void)on_exit;
0473 
0474         // Run the task. May throw an exception. Only block if the operation
0475         // queue is empty and we're not polling, otherwise we want to return
0476         // as soon as possible.
0477         task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);
0478       }
0479       else
0480       {
0481         std::size_t task_result = o->task_result_;
0482 
0483         if (more_handlers && !one_thread_)
0484           wake_one_thread_and_unlock(lock);
0485         else
0486           lock.unlock();
0487 
0488         // Ensure the count of outstanding work is decremented on block exit.
0489         work_cleanup on_exit = { this, &lock, &this_thread };
0490         (void)on_exit;
0491 
0492         // Complete the operation. May throw an exception. Deletes the object.
0493         o->complete(this, ec, task_result);
0494         this_thread.rethrow_pending_exception();
0495 
0496         return 1;
0497       }
0498     }
0499     else
0500     {
0501       wakeup_event_.clear(lock);
0502       wakeup_event_.wait(lock);
0503     }
0504   }
0505 
0506   return 0;
0507 }
0508 
0509 std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,
0510     scheduler::thread_info& this_thread, long usec,
0511     const boost::system::error_code& ec)
0512 {
0513   if (stopped_)
0514     return 0;
0515 
0516   operation* o = op_queue_.front();
0517   if (o == 0)
0518   {
0519     wakeup_event_.clear(lock);
0520     wakeup_event_.wait_for_usec(lock, usec);
0521     usec = 0; // Wait at most once.
0522     o = op_queue_.front();
0523   }
0524 
0525   if (o == &task_operation_)
0526   {
0527     op_queue_.pop();
0528     bool more_handlers = (!op_queue_.empty());
0529 
0530     task_interrupted_ = more_handlers;
0531 
0532     if (more_handlers && !one_thread_)
0533       wakeup_event_.unlock_and_signal_one(lock);
0534     else
0535       lock.unlock();
0536 
0537     {
0538       task_cleanup on_exit = { this, &lock, &this_thread };
0539       (void)on_exit;
0540 
0541       // Run the task. May throw an exception. Only block if the operation
0542       // queue is empty and we're not polling, otherwise we want to return
0543       // as soon as possible.
0544       task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);
0545     }
0546 
0547     o = op_queue_.front();
0548     if (o == &task_operation_)
0549     {
0550       if (!one_thread_)
0551         wakeup_event_.maybe_unlock_and_signal_one(lock);
0552       return 0;
0553     }
0554   }
0555 
0556   if (o == 0)
0557     return 0;
0558 
0559   op_queue_.pop();
0560   bool more_handlers = (!op_queue_.empty());
0561 
0562   std::size_t task_result = o->task_result_;
0563 
0564   if (more_handlers && !one_thread_)
0565     wake_one_thread_and_unlock(lock);
0566   else
0567     lock.unlock();
0568 
0569   // Ensure the count of outstanding work is decremented on block exit.
0570   work_cleanup on_exit = { this, &lock, &this_thread };
0571   (void)on_exit;
0572 
0573   // Complete the operation. May throw an exception. Deletes the object.
0574   o->complete(this, ec, task_result);
0575   this_thread.rethrow_pending_exception();
0576 
0577   return 1;
0578 }
0579 
0580 std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
0581     scheduler::thread_info& this_thread,
0582     const boost::system::error_code& ec)
0583 {
0584   if (stopped_)
0585     return 0;
0586 
0587   operation* o = op_queue_.front();
0588   if (o == &task_operation_)
0589   {
0590     op_queue_.pop();
0591     lock.unlock();
0592 
0593     {
0594       task_cleanup c = { this, &lock, &this_thread };
0595       (void)c;
0596 
0597       // Run the task. May throw an exception. Only block if the operation
0598       // queue is empty and we're not polling, otherwise we want to return
0599       // as soon as possible.
0600       task_->run(0, this_thread.private_op_queue);
0601     }
0602 
0603     o = op_queue_.front();
0604     if (o == &task_operation_)
0605     {
0606       wakeup_event_.maybe_unlock_and_signal_one(lock);
0607       return 0;
0608     }
0609   }
0610 
0611   if (o == 0)
0612     return 0;
0613 
0614   op_queue_.pop();
0615   bool more_handlers = (!op_queue_.empty());
0616 
0617   std::size_t task_result = o->task_result_;
0618 
0619   if (more_handlers && !one_thread_)
0620     wake_one_thread_and_unlock(lock);
0621   else
0622     lock.unlock();
0623 
0624   // Ensure the count of outstanding work is decremented on block exit.
0625   work_cleanup on_exit = { this, &lock, &this_thread };
0626   (void)on_exit;
0627 
0628   // Complete the operation. May throw an exception. Deletes the object.
0629   o->complete(this, ec, task_result);
0630   this_thread.rethrow_pending_exception();
0631 
0632   return 1;
0633 }
0634 
0635 void scheduler::stop_all_threads(
0636     mutex::scoped_lock& lock)
0637 {
0638   stopped_ = true;
0639   wakeup_event_.signal_all(lock);
0640 
0641   if (!task_interrupted_ && task_)
0642   {
0643     task_interrupted_ = true;
0644     task_->interrupt();
0645   }
0646 }
0647 
0648 void scheduler::wake_one_thread_and_unlock(
0649     mutex::scoped_lock& lock)
0650 {
0651   if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
0652   {
0653     if (!task_interrupted_ && task_)
0654     {
0655       task_interrupted_ = true;
0656       task_->interrupt();
0657     }
0658     lock.unlock();
0659   }
0660 }
0661 
0662 scheduler_task* scheduler::get_default_task(boost::asio::execution_context& ctx)
0663 {
0664 #if defined(BOOST_ASIO_HAS_IO_URING_AS_DEFAULT)
0665   return &use_service<io_uring_service>(ctx);
0666 #else // defined(BOOST_ASIO_HAS_IO_URING_AS_DEFAULT)
0667   return &use_service<reactor>(ctx);
0668 #endif // defined(BOOST_ASIO_HAS_IO_URING_AS_DEFAULT)
0669 }
0670 
0671 } // namespace detail
0672 } // namespace asio
0673 } // namespace boost
0674 
0675 #include <boost/asio/detail/pop_options.hpp>
0676 
0677 #endif // BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP