2016-05-19 06:30:21 +08:00
|
|
|
#include "config.h" // IWYU pragma: keep
|
|
|
|
|
2019-10-14 06:50:48 +08:00
|
|
|
#include "iothread.h"
|
|
|
|
|
2016-04-21 14:00:54 +08:00
|
|
|
#include <limits.h>
|
2016-05-02 12:01:00 +08:00
|
|
|
#include <pthread.h>
|
|
|
|
#include <signal.h>
|
2019-02-01 17:04:14 +08:00
|
|
|
#include <stdio.h>
|
2020-01-30 17:21:15 +08:00
|
|
|
#include <string.h>
|
2015-07-25 23:14:25 +08:00
|
|
|
#include <sys/select.h>
|
2016-05-29 13:28:26 +08:00
|
|
|
#include <sys/time.h>
|
2016-04-21 14:00:54 +08:00
|
|
|
#include <sys/types.h>
|
2011-12-27 13:21:12 +08:00
|
|
|
#include <unistd.h>
|
2017-02-14 12:37:27 +08:00
|
|
|
|
2019-10-14 06:50:48 +08:00
|
|
|
#include <atomic>
|
2017-08-19 03:26:35 +08:00
|
|
|
#include <condition_variable>
|
2019-11-24 05:37:15 +08:00
|
|
|
#include <functional>
|
2012-02-28 11:46:15 +08:00
|
|
|
#include <queue>
|
2020-03-03 14:57:41 +08:00
|
|
|
#include <thread>
|
2016-04-21 14:00:54 +08:00
|
|
|
|
|
|
|
#include "common.h"
|
2021-02-03 09:16:26 +08:00
|
|
|
#include "fds.h"
|
2019-05-28 06:56:53 +08:00
|
|
|
#include "flog.h"
|
2019-04-29 06:56:49 +08:00
|
|
|
#include "global_safety.h"
|
2017-01-24 01:59:56 +08:00
|
|
|
#include "wutil.h"
|
2011-12-27 13:21:12 +08:00
|
|
|
|
2020-01-18 16:26:59 +08:00
|
|
|
// We just define a thread limit of 1024.
|
|
|
|
// On all systems I've seen the limit is higher,
|
|
|
|
// but on some (like linux with glibc) the setting for _POSIX_THREAD_THREADS_MAX is 64,
|
|
|
|
// which is too low, even tho the system can handle more than 64 threads.
|
|
|
|
#define IO_MAX_THREADS 1024
|
2011-12-27 13:21:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Values for the wakeup bytes sent to the ioport.
|
2013-11-28 08:04:12 +08:00
|
|
|
#define IO_SERVICE_MAIN_THREAD_REQUEST_QUEUE 99
|
2014-04-18 03:02:43 +08:00
|
|
|
#define IO_SERVICE_RESULT_QUEUE 100
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2019-11-24 05:37:15 +08:00
|
|
|
// The amount of time an IO thread many hang around to service requests, in milliseconds.
|
|
|
|
#define IO_WAIT_FOR_WORK_DURATION_MS 500
|
|
|
|
|
2018-02-19 10:33:04 +08:00
|
|
|
static void iothread_service_main_thread_requests();
|
2014-04-18 03:02:43 +08:00
|
|
|
static void iothread_service_result_queue();
|
2011-12-27 13:21:12 +08:00
|
|
|
|
2019-11-26 08:56:39 +08:00
|
|
|
using void_function_t = std::function<void()>;
|
2017-01-24 03:35:22 +08:00
|
|
|
|
2019-11-24 04:13:18 +08:00
|
|
|
struct work_request_t {
|
2017-01-24 03:35:22 +08:00
|
|
|
void_function_t handler;
|
|
|
|
void_function_t completion;
|
|
|
|
|
2019-11-24 05:37:15 +08:00
|
|
|
work_request_t(void_function_t &&f, void_function_t &&comp)
|
|
|
|
: handler(std::move(f)), completion(std::move(comp)) {}
|
2017-01-24 01:56:02 +08:00
|
|
|
|
|
|
|
// Move-only
|
2019-11-24 04:13:18 +08:00
|
|
|
work_request_t &operator=(const work_request_t &) = delete;
|
|
|
|
work_request_t &operator=(work_request_t &&) = default;
|
|
|
|
work_request_t(const work_request_t &) = delete;
|
|
|
|
work_request_t(work_request_t &&) = default;
|
2011-12-27 13:21:12 +08:00
|
|
|
};
|
|
|
|
|
2017-01-24 01:34:30 +08:00
|
|
|
struct main_thread_request_t {
|
2019-11-24 05:37:15 +08:00
|
|
|
relaxed_atomic_bool_t done{false};
|
2017-01-24 03:35:22 +08:00
|
|
|
void_function_t func;
|
2017-01-24 01:56:02 +08:00
|
|
|
|
2020-02-21 14:54:30 +08:00
|
|
|
explicit main_thread_request_t(void_function_t &&f) : func(f) {}
|
2017-01-24 01:56:02 +08:00
|
|
|
|
|
|
|
// No moving OR copying
|
|
|
|
// main_thread_requests are always stack allocated, and we deal in pointers to them
|
2017-01-24 02:58:38 +08:00
|
|
|
void operator=(const main_thread_request_t &) = delete;
|
|
|
|
main_thread_request_t(const main_thread_request_t &) = delete;
|
|
|
|
main_thread_request_t(main_thread_request_t &&) = delete;
|
2013-11-28 08:04:12 +08:00
|
|
|
};
|
|
|
|
|
2019-11-24 05:37:15 +08:00
|
|
|
struct thread_pool_t {
|
|
|
|
struct data_t {
|
|
|
|
/// The queue of outstanding, unclaimed requests.
|
|
|
|
std::queue<work_request_t> request_queue{};
|
|
|
|
|
|
|
|
/// The number of threads that exist in the pool.
|
2019-11-25 04:20:28 +08:00
|
|
|
size_t total_threads{0};
|
2019-11-24 05:37:15 +08:00
|
|
|
|
|
|
|
/// The number of threads which are waiting for more work.
|
2019-11-25 04:20:28 +08:00
|
|
|
size_t waiting_threads{0};
|
2019-11-24 05:37:15 +08:00
|
|
|
|
|
|
|
/// A flag indicating we should not process new requests.
|
|
|
|
bool drain{false};
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Data which needs to be atomically accessed.
|
2019-11-25 04:20:28 +08:00
|
|
|
owning_lock<data_t> req_data{};
|
2019-11-24 05:37:15 +08:00
|
|
|
|
|
|
|
/// The condition variable used to wake up waiting threads.
|
|
|
|
/// Note this is tied to data's lock.
|
|
|
|
std::condition_variable queue_cond{};
|
2019-11-24 04:13:18 +08:00
|
|
|
|
2019-11-24 05:37:15 +08:00
|
|
|
/// The minimum and maximum number of threads.
|
|
|
|
/// Here "minimum" means threads that are kept waiting in the pool.
|
|
|
|
/// Note that the pool is initially empty and threads may decide to exit based on a time wait.
|
2019-11-25 04:20:28 +08:00
|
|
|
const size_t soft_min_threads;
|
|
|
|
const size_t max_threads;
|
|
|
|
|
|
|
|
/// Construct with a soft minimum and maximum thread count.
|
|
|
|
thread_pool_t(size_t soft_min_threads, size_t max_threads)
|
|
|
|
: soft_min_threads(soft_min_threads), max_threads(max_threads) {}
|
|
|
|
|
|
|
|
/// Enqueue a new work item onto the thread pool.
|
|
|
|
/// The function \p func will execute in one of the pool's threads.
|
|
|
|
/// \p completion will run on the main thread, if it is not missing.
|
2020-01-19 03:32:44 +08:00
|
|
|
/// If \p cant_wait is set, disrespect the thread limit, because extant threads may
|
2020-01-22 06:43:17 +08:00
|
|
|
/// want to wait for new threads.
|
2020-01-19 03:32:44 +08:00
|
|
|
int perform(void_function_t &&func, void_function_t &&completion, bool cant_wait);
|
2019-11-25 04:20:28 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
/// The worker loop for this thread.
|
|
|
|
void *run();
|
|
|
|
|
|
|
|
/// Dequeue a work item (perhaps waiting on the condition variable), or commit to exiting by
|
|
|
|
/// reducing the active thread count.
|
|
|
|
/// This runs in the background thread.
|
|
|
|
maybe_t<work_request_t> dequeue_work_or_commit_to_exit();
|
|
|
|
|
|
|
|
/// Trampoline function for pthread_spawn compatibility.
|
|
|
|
static void *run_trampoline(void *vpool);
|
|
|
|
|
|
|
|
/// Attempt to spawn a new pthread.
|
2020-03-14 04:59:10 +08:00
|
|
|
bool spawn() const;
|
2019-11-25 04:20:28 +08:00
|
|
|
|
|
|
|
/// No copying or moving.
|
|
|
|
thread_pool_t(const thread_pool_t &) = delete;
|
|
|
|
thread_pool_t(thread_pool_t &&) = delete;
|
|
|
|
void operator=(const thread_pool_t &) = delete;
|
|
|
|
void operator=(thread_pool_t &&) = delete;
|
2017-01-30 13:06:46 +08:00
|
|
|
};
|
2019-11-24 05:37:15 +08:00
|
|
|
|
|
|
|
/// The thread pool for "iothreads" which are used to lift I/O off of the main thread.
|
|
|
|
/// These are used for completions, etc.
|
2021-01-01 09:03:53 +08:00
|
|
|
/// Leaked to avoid shutdown dtor registration (including tsan).
|
|
|
|
static thread_pool_t &s_io_thread_pool = *(new thread_pool_t(1, IO_MAX_THREADS));
|
2019-11-24 04:13:18 +08:00
|
|
|
|
2020-03-03 14:57:41 +08:00
|
|
|
static owning_lock<std::queue<void_function_t>> s_result_queue;
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// "Do on main thread" support.
|
2018-12-31 10:15:49 +08:00
|
|
|
static std::mutex s_main_thread_performer_lock; // protects the main thread requests
|
2017-08-19 03:26:35 +08:00
|
|
|
static std::condition_variable s_main_thread_performer_cond; // protects the main thread requests
|
2019-04-29 03:59:21 +08:00
|
|
|
|
|
|
|
/// The queue of main thread requests. This queue contains pointers to structs that are
|
|
|
|
/// stack-allocated on the requesting thread.
|
|
|
|
static owning_lock<std::queue<main_thread_request_t *>> s_main_thread_request_queue;
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2019-04-29 06:56:49 +08:00
|
|
|
// Pipes used for notifying.
|
|
|
|
struct notify_pipes_t {
|
|
|
|
int read;
|
|
|
|
int write;
|
|
|
|
};
|
2012-11-18 18:23:22 +08:00
|
|
|
|
2019-04-29 06:56:49 +08:00
|
|
|
/// \return the (immortal) set of pipes used for notifying of completions.
|
|
|
|
static const notify_pipes_t &get_notify_pipes() {
|
|
|
|
static const notify_pipes_t s_notify_pipes = [] {
|
2021-02-03 12:30:52 +08:00
|
|
|
auto pipes = make_autoclose_pipes();
|
2021-02-03 10:35:49 +08:00
|
|
|
if (!pipes) {
|
|
|
|
DIE_WITH_ERRNO("Unable to create iothread notify pipes");
|
2019-06-04 07:42:51 +08:00
|
|
|
}
|
2021-02-03 10:35:49 +08:00
|
|
|
// Mark both ends as non-blocking.
|
|
|
|
if (make_fd_nonblocking(pipes->read.fd())) wperror(L"fcntl");
|
|
|
|
if (make_fd_nonblocking(pipes->write.fd())) wperror(L"fcntl");
|
|
|
|
return notify_pipes_t{pipes->read.acquire(), pipes->write.acquire()};
|
2019-04-29 06:56:49 +08:00
|
|
|
}();
|
|
|
|
return s_notify_pipes;
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2019-11-24 05:37:15 +08:00
|
|
|
/// Dequeue a work item (perhaps waiting on the condition variable), or commit to exiting by
|
|
|
|
/// reducing the active thread count.
|
2019-11-25 04:20:28 +08:00
|
|
|
maybe_t<work_request_t> thread_pool_t::dequeue_work_or_commit_to_exit() {
|
|
|
|
auto data = this->req_data.acquire();
|
|
|
|
// If the queue is empty, check to see if we should wait.
|
|
|
|
// We should wait if our exiting would drop us below the soft min.
|
|
|
|
if (data->request_queue.empty() && data->total_threads == this->soft_min_threads) {
|
|
|
|
data->waiting_threads += 1;
|
|
|
|
this->queue_cond.wait_for(data.get_lock(),
|
|
|
|
std::chrono::milliseconds(IO_WAIT_FOR_WORK_DURATION_MS));
|
|
|
|
data->waiting_threads -= 1;
|
2012-02-28 11:46:15 +08:00
|
|
|
}
|
2019-11-24 05:37:15 +08:00
|
|
|
|
2019-11-25 04:20:28 +08:00
|
|
|
// Now that we've perhaps waited, see if there's something on the queue.
|
|
|
|
maybe_t<work_request_t> result{};
|
|
|
|
if (!data->request_queue.empty()) {
|
|
|
|
result = std::move(data->request_queue.front());
|
|
|
|
data->request_queue.pop();
|
|
|
|
}
|
|
|
|
// If we are returning none, then ensure we balance the thread count increment from when we were
|
|
|
|
// created. This has to be done here in this awkward place because we've already committed to
|
|
|
|
// exiting - we will never pick up more work. So we need to ensure we decrement the thread count
|
|
|
|
// while holding the lock as we are effectively exited.
|
|
|
|
if (!result) {
|
|
|
|
data->total_threads -= 1;
|
|
|
|
}
|
2019-11-24 04:13:18 +08:00
|
|
|
return result;
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2020-03-03 14:57:41 +08:00
|
|
|
static void enqueue_thread_result(void_function_t req) {
|
2018-09-02 04:11:42 +08:00
|
|
|
s_result_queue.acquire()->push(std::move(req));
|
2020-03-03 14:57:41 +08:00
|
|
|
const char wakeup_byte = IO_SERVICE_RESULT_QUEUE;
|
|
|
|
int notify_fd = get_notify_pipes().write;
|
|
|
|
assert_with_errno(write_loop(notify_fd, &wakeup_byte, sizeof wakeup_byte) != -1);
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
2012-11-19 08:30:30 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
static void *this_thread() { return (void *)(intptr_t)pthread_self(); }
|
2014-04-28 08:23:19 +08:00
|
|
|
|
2019-11-25 04:20:28 +08:00
|
|
|
void *thread_pool_t::run() {
|
|
|
|
while (auto req = dequeue_work_or_commit_to_exit()) {
|
2019-11-24 05:37:15 +08:00
|
|
|
FLOGF(iothread, L"pthread %p got work", this_thread());
|
2016-05-02 12:01:00 +08:00
|
|
|
|
2017-01-30 13:06:46 +08:00
|
|
|
// Perform the work
|
2019-11-24 04:13:18 +08:00
|
|
|
req->handler();
|
2016-05-02 12:01:00 +08:00
|
|
|
|
2017-01-24 01:56:02 +08:00
|
|
|
// If there's a completion handler, we have to enqueue it on the result queue.
|
2017-01-24 03:35:22 +08:00
|
|
|
// Note we're using std::function's weirdo operator== here
|
2019-11-24 04:13:18 +08:00
|
|
|
if (req->completion != nullptr) {
|
2016-05-02 12:01:00 +08:00
|
|
|
// Enqueue the result, and tell the main thread about it.
|
2020-03-03 14:57:41 +08:00
|
|
|
enqueue_thread_result(std::move(req->completion));
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
2012-11-19 08:30:30 +08:00
|
|
|
}
|
2019-11-24 05:37:15 +08:00
|
|
|
FLOGF(iothread, L"pthread %p exiting", this_thread());
|
|
|
|
return nullptr;
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2019-11-25 04:20:28 +08:00
|
|
|
void *thread_pool_t::run_trampoline(void *pool) {
|
|
|
|
assert(pool && "No thread pool given");
|
|
|
|
return static_cast<thread_pool_t *>(pool)->run();
|
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
/// Spawn another thread. No lock is held when this is called.
|
2020-03-14 04:59:10 +08:00
|
|
|
bool thread_pool_t::spawn() const {
|
|
|
|
return make_detached_pthread(&run_trampoline, const_cast<thread_pool_t *>(this));
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2020-01-19 03:32:44 +08:00
|
|
|
int thread_pool_t::perform(void_function_t &&func, void_function_t &&completion, bool cant_wait) {
|
2019-11-25 04:20:28 +08:00
|
|
|
assert(func && "Missing function");
|
|
|
|
// Note we permit an empty completion.
|
2019-11-24 04:13:18 +08:00
|
|
|
struct work_request_t req(std::move(func), std::move(completion));
|
2014-04-18 03:02:43 +08:00
|
|
|
int local_thread_count = -1;
|
2019-11-24 05:37:15 +08:00
|
|
|
auto &pool = s_io_thread_pool;
|
2014-04-18 03:02:43 +08:00
|
|
|
bool spawn_new_thread = false;
|
2019-11-24 05:37:15 +08:00
|
|
|
bool wakeup_thread = false;
|
2014-04-18 03:02:43 +08:00
|
|
|
{
|
2017-01-24 03:35:22 +08:00
|
|
|
// Lock around a local region.
|
2019-11-25 04:20:28 +08:00
|
|
|
auto data = pool.req_data.acquire();
|
2019-11-24 05:37:15 +08:00
|
|
|
data->request_queue.push(std::move(req));
|
2019-11-25 04:20:28 +08:00
|
|
|
FLOGF(iothread, L"enqueuing work item (count is %lu)", data->request_queue.size());
|
2019-11-24 05:37:15 +08:00
|
|
|
if (data->drain) {
|
|
|
|
// Do nothing here.
|
2019-11-25 04:20:28 +08:00
|
|
|
} else if (data->waiting_threads >= data->request_queue.size()) {
|
|
|
|
// There's enough waiting threads, wake one up.
|
2019-11-24 05:37:15 +08:00
|
|
|
wakeup_thread = true;
|
2020-01-19 03:32:44 +08:00
|
|
|
} else if (cant_wait || data->total_threads < pool.max_threads) {
|
|
|
|
// No threads are waiting but we can or must spawn a new thread.
|
2019-11-24 05:37:15 +08:00
|
|
|
data->total_threads += 1;
|
2014-04-18 03:02:43 +08:00
|
|
|
spawn_new_thread = true;
|
|
|
|
}
|
2019-11-24 05:37:15 +08:00
|
|
|
local_thread_count = data->total_threads;
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
2016-05-02 12:01:00 +08:00
|
|
|
|
|
|
|
// Kick off the thread if we decided to do so.
|
2019-11-24 05:37:15 +08:00
|
|
|
if (wakeup_thread) {
|
2019-11-25 04:20:28 +08:00
|
|
|
FLOGF(iothread, L"notifying a thread", this_thread());
|
2019-11-24 05:37:15 +08:00
|
|
|
pool.queue_cond.notify_one();
|
|
|
|
}
|
2016-05-02 12:01:00 +08:00
|
|
|
if (spawn_new_thread) {
|
2019-12-17 06:08:28 +08:00
|
|
|
// Spawn a thread. If this fails, it means there's already a bunch of threads; it is very
|
|
|
|
// unlikely that they are all on the verge of exiting, so one is likely to be ready to
|
|
|
|
// handle extant requests. So we can ignore failure with some confidence.
|
|
|
|
if (this->spawn()) {
|
|
|
|
FLOGF(iothread, L"pthread spawned");
|
2019-11-25 04:20:28 +08:00
|
|
|
} else {
|
|
|
|
// We failed to spawn a thread; decrement the thread count.
|
|
|
|
pool.req_data.acquire()->total_threads -= 1;
|
|
|
|
}
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
|
|
|
return local_thread_count;
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2020-11-02 07:07:59 +08:00
|
|
|
void iothread_perform_impl(void_function_t &&func, void_function_t &&completion, bool cant_wait) {
|
2019-11-25 04:20:28 +08:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
ASSERT_IS_NOT_FORKED_CHILD();
|
2020-11-02 07:07:59 +08:00
|
|
|
s_io_thread_pool.perform(std::move(func), std::move(completion), cant_wait);
|
2019-11-25 04:20:28 +08:00
|
|
|
}
|
|
|
|
|
2019-04-29 06:56:49 +08:00
|
|
|
int iothread_port() { return get_notify_pipes().read; }
|
2011-12-27 13:21:12 +08:00
|
|
|
|
2018-02-19 10:33:04 +08:00
|
|
|
void iothread_service_completion() {
|
2012-02-28 11:46:15 +08:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2019-06-04 07:42:51 +08:00
|
|
|
// Drain the read buffer, and then service completions.
|
|
|
|
// The order is important.
|
|
|
|
int port = iothread_port();
|
|
|
|
char buff[256];
|
|
|
|
while (read(port, buff, sizeof buff) > 0) {
|
|
|
|
// pass
|
2013-11-28 08:04:12 +08:00
|
|
|
}
|
2019-06-04 07:42:51 +08:00
|
|
|
|
|
|
|
iothread_service_main_thread_requests();
|
|
|
|
iothread_service_result_queue();
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
2012-11-18 18:23:22 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
static bool iothread_wait_for_pending_completions(long timeout_usec) {
|
2014-04-18 03:02:43 +08:00
|
|
|
const long usec_per_sec = 1000000;
|
|
|
|
struct timeval tv;
|
|
|
|
tv.tv_sec = timeout_usec / usec_per_sec;
|
|
|
|
tv.tv_usec = timeout_usec % usec_per_sec;
|
|
|
|
const int fd = iothread_port();
|
|
|
|
fd_set fds;
|
|
|
|
FD_ZERO(&fds);
|
|
|
|
FD_SET(fd, &fds);
|
2019-11-19 10:34:50 +08:00
|
|
|
int ret = select(fd + 1, &fds, nullptr, nullptr, &tv);
|
2014-04-18 03:02:43 +08:00
|
|
|
return ret > 0;
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
2012-02-28 11:46:15 +08:00
|
|
|
|
2020-11-02 10:25:09 +08:00
|
|
|
void iothread_service_completion_with_timeout(long timeout_usec) {
|
|
|
|
if (iothread_wait_for_pending_completions(timeout_usec)) {
|
|
|
|
iothread_service_completion();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
/// At the moment, this function is only used in the test suite and in a
|
|
|
|
/// drain-all-threads-before-fork compatibility mode that no architecture requires, so it's OK that
|
|
|
|
/// it's terrible.
|
2019-11-24 05:37:15 +08:00
|
|
|
int iothread_drain_all() {
|
2012-02-28 11:46:15 +08:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2012-11-18 18:23:22 +08:00
|
|
|
ASSERT_IS_NOT_FORKED_CHILD();
|
2016-05-02 12:01:00 +08:00
|
|
|
|
2019-11-24 05:37:15 +08:00
|
|
|
int thread_count;
|
|
|
|
auto &pool = s_io_thread_pool;
|
|
|
|
// Set the drain flag.
|
|
|
|
{
|
2019-11-25 04:20:28 +08:00
|
|
|
auto data = pool.req_data.acquire();
|
2019-11-24 05:37:15 +08:00
|
|
|
assert(!data->drain && "Should not be draining already");
|
|
|
|
data->drain = true;
|
|
|
|
thread_count = data->total_threads;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wake everyone up.
|
|
|
|
pool.queue_cond.notify_all();
|
|
|
|
|
2012-03-07 07:12:37 +08:00
|
|
|
double now = timef();
|
2016-05-02 12:01:00 +08:00
|
|
|
|
|
|
|
// Nasty polling via select().
|
2019-11-25 04:20:28 +08:00
|
|
|
while (pool.req_data.acquire()->total_threads > 0) {
|
2016-05-02 12:01:00 +08:00
|
|
|
if (iothread_wait_for_pending_completions(1000)) {
|
2014-04-18 03:02:43 +08:00
|
|
|
iothread_service_completion();
|
|
|
|
}
|
2012-02-28 11:46:15 +08:00
|
|
|
}
|
2019-11-24 05:37:15 +08:00
|
|
|
|
|
|
|
// Clear the drain flag.
|
|
|
|
// Even though we released the lock, nobody should have added a new thread while the drain flag
|
|
|
|
// is set.
|
|
|
|
{
|
2019-11-25 04:20:28 +08:00
|
|
|
auto data = pool.req_data.acquire();
|
2019-11-24 05:37:15 +08:00
|
|
|
assert(data->total_threads == 0 && "Should be no threads");
|
|
|
|
assert(data->drain && "Should be draining");
|
|
|
|
data->drain = false;
|
|
|
|
}
|
|
|
|
|
2012-03-07 07:12:37 +08:00
|
|
|
double after = timef();
|
2019-11-24 05:37:15 +08:00
|
|
|
FLOGF(iothread, "Drained %d thread(s) in %.02f msec", thread_count, 1000 * (after - now));
|
|
|
|
return thread_count;
|
2012-02-28 11:46:15 +08:00
|
|
|
}
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
/// "Do on main thread" support.
|
2018-02-19 10:33:04 +08:00
|
|
|
static void iothread_service_main_thread_requests() {
|
2013-11-28 08:04:12 +08:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Move the queue to a local variable.
|
2017-01-24 01:34:30 +08:00
|
|
|
std::queue<main_thread_request_t *> request_queue;
|
2019-04-29 03:59:21 +08:00
|
|
|
s_main_thread_request_queue.acquire()->swap(request_queue);
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
if (!request_queue.empty()) {
|
|
|
|
// Perform each of the functions. Note we are NOT responsible for deleting these. They are
|
|
|
|
// stack allocated in their respective threads!
|
|
|
|
while (!request_queue.empty()) {
|
2017-01-24 01:34:30 +08:00
|
|
|
main_thread_request_t *req = request_queue.front();
|
2013-11-28 08:04:12 +08:00
|
|
|
request_queue.pop();
|
2017-01-24 02:37:16 +08:00
|
|
|
req->func();
|
2013-11-28 08:04:12 +08:00
|
|
|
req->done = true;
|
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Ok, we've handled everybody. Announce the good news, and allow ourselves to be unlocked.
|
|
|
|
// Note we must do this while holding the lock. Otherwise we race with the waiting threads:
|
|
|
|
//
|
|
|
|
// 1. waiting thread checks for done, sees false
|
|
|
|
// 2. main thread performs request, sets done to true, posts to condition
|
|
|
|
// 3. waiting thread unlocks lock, waits on condition (forever)
|
|
|
|
//
|
|
|
|
// Because the waiting thread performs step 1 under the lock, if we take the lock, we avoid
|
|
|
|
// posting before the waiting thread is waiting.
|
2019-06-04 07:42:51 +08:00
|
|
|
// TODO: revisit this logic, this feels sketchy.
|
2016-07-21 13:30:58 +08:00
|
|
|
scoped_lock broadcast_lock(s_main_thread_performer_lock);
|
2017-08-19 03:26:35 +08:00
|
|
|
s_main_thread_performer_cond.notify_all();
|
2013-11-28 08:04:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-24 01:56:02 +08:00
|
|
|
// Service the queue of results
|
2016-05-02 12:01:00 +08:00
|
|
|
static void iothread_service_result_queue() {
|
|
|
|
// Move the queue to a local variable.
|
2020-03-03 14:57:41 +08:00
|
|
|
std::queue<void_function_t> result_queue;
|
2019-06-04 07:42:51 +08:00
|
|
|
s_result_queue.acquire()->swap(result_queue);
|
2016-05-02 12:01:00 +08:00
|
|
|
|
2017-01-30 13:06:46 +08:00
|
|
|
// Perform each completion in order
|
2016-05-02 12:01:00 +08:00
|
|
|
while (!result_queue.empty()) {
|
2020-03-03 14:57:41 +08:00
|
|
|
void_function_t req(std::move(result_queue.front()));
|
2014-04-18 03:02:43 +08:00
|
|
|
result_queue.pop();
|
2017-01-24 03:35:22 +08:00
|
|
|
// ensure we don't invoke empty functions, that raises an exception
|
2020-03-03 14:57:41 +08:00
|
|
|
if (req != nullptr) {
|
|
|
|
req();
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-24 03:35:22 +08:00
|
|
|
void iothread_perform_on_main(void_function_t &&func) {
|
2016-05-02 12:01:00 +08:00
|
|
|
if (is_main_thread()) {
|
2017-01-24 02:37:16 +08:00
|
|
|
func();
|
|
|
|
return;
|
2013-11-30 05:31:18 +08:00
|
|
|
}
|
2013-11-28 08:04:12 +08:00
|
|
|
|
|
|
|
// Make a new request. Note we are synchronous, so this can be stack allocated!
|
2017-01-24 02:37:16 +08:00
|
|
|
main_thread_request_t req(std::move(func));
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2019-04-29 03:59:21 +08:00
|
|
|
// Append it. Ensure we don't hold the lock after.
|
|
|
|
s_main_thread_request_queue.acquire()->push(&req);
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Tell the pipe.
|
2014-04-18 03:02:43 +08:00
|
|
|
const char wakeup_byte = IO_SERVICE_MAIN_THREAD_REQUEST_QUEUE;
|
2019-04-29 06:56:49 +08:00
|
|
|
int notify_fd = get_notify_pipes().write;
|
|
|
|
assert_with_errno(write_loop(notify_fd, &wakeup_byte, sizeof wakeup_byte) != -1);
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Wait on the condition, until we're done.
|
2018-12-31 10:15:49 +08:00
|
|
|
std::unique_lock<std::mutex> perform_lock(s_main_thread_performer_lock);
|
2016-05-02 12:01:00 +08:00
|
|
|
while (!req.done) {
|
|
|
|
// It would be nice to support checking for cancellation here, but the clients need a
|
|
|
|
// deterministic way to clean up to avoid leaks
|
2017-08-19 03:26:35 +08:00
|
|
|
s_main_thread_performer_cond.wait(perform_lock);
|
2013-11-28 08:04:12 +08:00
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Ok, the request must now be done.
|
2013-11-28 08:04:12 +08:00
|
|
|
assert(req.done);
|
|
|
|
}
|
2019-02-01 17:04:14 +08:00
|
|
|
|
2019-12-17 06:08:28 +08:00
|
|
|
bool make_detached_pthread(void *(*func)(void *), void *param) {
|
2019-02-01 17:04:14 +08:00
|
|
|
// The spawned thread inherits our signal mask. We don't want the thread to ever receive signals
|
|
|
|
// on the spawned thread, so temporarily block all signals, spawn the thread, and then restore
|
|
|
|
// it.
|
|
|
|
sigset_t new_set, saved_set;
|
|
|
|
sigfillset(&new_set);
|
|
|
|
DIE_ON_FAILURE(pthread_sigmask(SIG_BLOCK, &new_set, &saved_set));
|
|
|
|
|
|
|
|
// Spawn a thread. If this fails, it means there's already a bunch of threads; it is very
|
|
|
|
// unlikely that they are all on the verge of exiting, so one is likely to be ready to handle
|
|
|
|
// extant requests. So we can ignore failure with some confidence.
|
|
|
|
pthread_t thread = 0;
|
2019-11-19 10:34:50 +08:00
|
|
|
int err = pthread_create(&thread, nullptr, func, param);
|
2019-02-01 17:04:14 +08:00
|
|
|
if (err == 0) {
|
|
|
|
// Success, return the thread.
|
2020-01-19 21:41:17 +08:00
|
|
|
FLOGF(iothread, "pthread %p spawned", (void *)(intptr_t)thread);
|
2019-12-17 06:08:28 +08:00
|
|
|
DIE_ON_FAILURE(pthread_detach(thread));
|
2019-02-01 17:04:14 +08:00
|
|
|
} else {
|
|
|
|
perror("pthread_create");
|
|
|
|
}
|
|
|
|
// Restore our sigmask.
|
2019-11-19 10:34:50 +08:00
|
|
|
DIE_ON_FAILURE(pthread_sigmask(SIG_SETMASK, &saved_set, nullptr));
|
2019-02-01 17:04:14 +08:00
|
|
|
return err == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
using void_func_t = std::function<void(void)>;
|
|
|
|
|
|
|
|
static void *func_invoker(void *param) {
|
2019-05-30 03:33:44 +08:00
|
|
|
// Acquire a thread id for this thread.
|
|
|
|
(void)thread_id();
|
2020-04-03 07:04:04 +08:00
|
|
|
auto vf = static_cast<void_func_t *>(param);
|
2019-02-01 17:04:14 +08:00
|
|
|
(*vf)();
|
|
|
|
delete vf;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2019-12-17 06:08:28 +08:00
|
|
|
bool make_detached_pthread(void_func_t &&func) {
|
2019-02-01 17:04:14 +08:00
|
|
|
// Copy the function into a heap allocation.
|
2020-04-03 07:04:04 +08:00
|
|
|
auto vf = new void_func_t(std::move(func));
|
2019-12-17 06:08:28 +08:00
|
|
|
if (make_detached_pthread(func_invoker, vf)) {
|
2019-02-01 17:04:14 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// Thread spawning failed, clean up our heap allocation.
|
|
|
|
delete vf;
|
|
|
|
return false;
|
|
|
|
}
|
2019-05-30 03:33:44 +08:00
|
|
|
|
|
|
|
static uint64_t next_thread_id() {
|
|
|
|
// Note 0 is an invalid thread id.
|
|
|
|
static owning_lock<uint64_t> s_last_thread_id{};
|
|
|
|
auto tid = s_last_thread_id.acquire();
|
|
|
|
return ++*tid;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t thread_id() {
|
2020-05-23 04:31:25 +08:00
|
|
|
static FISH_THREAD_LOCAL uint64_t tl_tid = next_thread_id();
|
2019-05-30 03:33:44 +08:00
|
|
|
return tl_tid;
|
|
|
|
}
|
2020-03-03 14:57:41 +08:00
|
|
|
|
|
|
|
// Debounce implementation note: we would like to enqueue at most one request, except if a thread
|
|
|
|
// hangs (e.g. on fs access) then we do not want to block indefinitely; such threads are called
|
|
|
|
// "abandoned". This is implemented via a monotone uint64 counter, called a token.
|
|
|
|
// Every time we spawn a thread, increment the token. When the thread is completed, it compares its
|
|
|
|
// token to the active token; if they differ then this thread was abandoned.
|
|
|
|
struct debounce_t::impl_t {
|
|
|
|
// Synchronized data from debounce_t.
|
|
|
|
struct data_t {
|
|
|
|
// The (at most 1) next enqueued request, or none if none.
|
|
|
|
maybe_t<work_request_t> next_req{};
|
|
|
|
|
|
|
|
// The token of the current non-abandoned thread, or 0 if no thread is running.
|
|
|
|
uint64_t active_token{0};
|
|
|
|
|
|
|
|
// The next token to use when spawning a thread.
|
|
|
|
uint64_t next_token{1};
|
|
|
|
|
|
|
|
// The start time of the most recently run thread spawn, or request (if any).
|
|
|
|
std::chrono::time_point<std::chrono::steady_clock> start_time{};
|
|
|
|
};
|
|
|
|
owning_lock<data_t> data{};
|
|
|
|
|
|
|
|
/// Run an iteration in the background, with the given thread token.
|
|
|
|
/// \return true if we handled a request, false if there were none.
|
|
|
|
bool run_next(uint64_t token);
|
|
|
|
};
|
|
|
|
|
|
|
|
bool debounce_t::impl_t::run_next(uint64_t token) {
|
|
|
|
assert(token > 0 && "Invalid token");
|
|
|
|
// Note we are on a background thread.
|
|
|
|
maybe_t<work_request_t> req;
|
|
|
|
{
|
|
|
|
auto d = data.acquire();
|
|
|
|
if (d->next_req) {
|
|
|
|
// The value was dequeued, we are going to execute it.
|
|
|
|
req = d->next_req.acquire();
|
|
|
|
d->start_time = std::chrono::steady_clock::now();
|
|
|
|
} else {
|
|
|
|
// There is no request. If we are active, mark ourselves as no longer running.
|
|
|
|
if (token == d->active_token) {
|
|
|
|
d->active_token = 0;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(req && req->handler && "Request should have value");
|
|
|
|
req->handler();
|
|
|
|
if (req->completion) {
|
|
|
|
enqueue_thread_result(std::move(req->completion));
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t debounce_t::perform_impl(std::function<void()> handler, std::function<void()> completion) {
|
|
|
|
uint64_t active_token{0};
|
|
|
|
bool spawn{false};
|
|
|
|
// Local lock.
|
|
|
|
{
|
|
|
|
auto d = impl_->data.acquire();
|
|
|
|
d->next_req = work_request_t{std::move(handler), std::move(completion)};
|
|
|
|
// If we have a timeout, and our running thread has exceeded it, abandon that thread.
|
|
|
|
if (d->active_token && timeout_msec_ > 0 &&
|
|
|
|
std::chrono::steady_clock::now() - d->start_time >
|
|
|
|
std::chrono::milliseconds(timeout_msec_)) {
|
|
|
|
// Abandon this thread by marking nothing as active.
|
|
|
|
d->active_token = 0;
|
|
|
|
}
|
|
|
|
if (!d->active_token) {
|
|
|
|
// We need to spawn a new thread.
|
|
|
|
// Mark the current time so that a new request won't immediately abandon us.
|
|
|
|
spawn = true;
|
|
|
|
d->active_token = d->next_token++;
|
|
|
|
d->start_time = std::chrono::steady_clock::now();
|
|
|
|
}
|
|
|
|
active_token = d->active_token;
|
|
|
|
assert(active_token && "Something should be active");
|
|
|
|
}
|
|
|
|
if (spawn) {
|
|
|
|
// Equip our background thread with a reference to impl, to keep it alive.
|
|
|
|
auto impl = impl_;
|
|
|
|
iothread_perform([=] {
|
|
|
|
while (impl->run_next(active_token))
|
|
|
|
; // pass
|
|
|
|
});
|
|
|
|
}
|
|
|
|
return active_token;
|
|
|
|
}
|
|
|
|
|
|
|
|
debounce_t::debounce_t(long timeout_msec)
|
|
|
|
: timeout_msec_(timeout_msec), impl_(std::make_shared<impl_t>()) {}
|
|
|
|
debounce_t::~debounce_t() = default;
|