2016-05-19 06:30:21 +08:00
|
|
|
#include "config.h" // IWYU pragma: keep
|
|
|
|
|
2016-04-21 14:00:54 +08:00
|
|
|
#include <limits.h>
|
2016-05-02 12:01:00 +08:00
|
|
|
#include <pthread.h>
|
|
|
|
#include <signal.h>
|
2015-07-25 23:14:25 +08:00
|
|
|
#include <sys/select.h>
|
2016-05-29 13:28:26 +08:00
|
|
|
#include <sys/time.h>
|
2016-04-21 14:00:54 +08:00
|
|
|
#include <sys/types.h>
|
2011-12-27 13:21:12 +08:00
|
|
|
#include <unistd.h>
|
2017-02-14 12:37:27 +08:00
|
|
|
|
2017-08-19 03:26:35 +08:00
|
|
|
#include <condition_variable>
|
2012-02-28 11:46:15 +08:00
|
|
|
#include <queue>
|
2016-04-21 14:00:54 +08:00
|
|
|
|
|
|
|
#include "common.h"
|
2016-05-02 12:01:00 +08:00
|
|
|
#include "iothread.h"
|
2017-01-24 01:59:56 +08:00
|
|
|
#include "wutil.h"
|
2011-12-27 13:21:12 +08:00
|
|
|
|
|
|
|
#ifdef _POSIX_THREAD_THREADS_MAX
|
2012-11-19 08:30:30 +08:00
|
|
|
#if _POSIX_THREAD_THREADS_MAX < 64
|
|
|
|
#define IO_MAX_THREADS _POSIX_THREAD_THREADS_MAX
|
|
|
|
#endif
|
2011-12-27 13:21:12 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef IO_MAX_THREADS
|
2012-11-19 08:30:30 +08:00
|
|
|
#define IO_MAX_THREADS 64
|
2011-12-27 13:21:12 +08:00
|
|
|
#endif
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Values for the wakeup bytes sent to the ioport.
|
2013-11-28 08:04:12 +08:00
|
|
|
#define IO_SERVICE_MAIN_THREAD_REQUEST_QUEUE 99
|
2014-04-18 03:02:43 +08:00
|
|
|
#define IO_SERVICE_RESULT_QUEUE 100
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2014-04-18 03:02:43 +08:00
|
|
|
static void iothread_service_main_thread_requests(void);
|
|
|
|
static void iothread_service_result_queue();
|
2011-12-27 13:21:12 +08:00
|
|
|
|
2017-01-24 03:35:22 +08:00
|
|
|
typedef std::function<void(void)> void_function_t;
|
|
|
|
|
2017-01-24 01:34:30 +08:00
|
|
|
struct spawn_request_t {
|
2017-01-24 03:35:22 +08:00
|
|
|
void_function_t handler;
|
|
|
|
void_function_t completion;
|
|
|
|
|
2017-01-27 12:00:43 +08:00
|
|
|
spawn_request_t() {}
|
2017-01-24 01:56:02 +08:00
|
|
|
|
2017-02-14 12:37:27 +08:00
|
|
|
spawn_request_t(void_function_t &&f, void_function_t &&comp) : handler(f), completion(comp) {}
|
2017-01-24 01:56:02 +08:00
|
|
|
|
|
|
|
// Move-only
|
|
|
|
spawn_request_t &operator=(const spawn_request_t &) = delete;
|
|
|
|
spawn_request_t &operator=(spawn_request_t &&) = default;
|
|
|
|
spawn_request_t(const spawn_request_t &) = delete;
|
|
|
|
spawn_request_t(spawn_request_t &&) = default;
|
2011-12-27 13:21:12 +08:00
|
|
|
};
|
|
|
|
|
2017-01-24 01:34:30 +08:00
|
|
|
struct main_thread_request_t {
|
2017-01-24 01:56:02 +08:00
|
|
|
volatile bool done = false;
|
2017-01-24 03:35:22 +08:00
|
|
|
void_function_t func;
|
2017-01-24 01:56:02 +08:00
|
|
|
|
2017-02-14 10:48:59 +08:00
|
|
|
main_thread_request_t(void_function_t &&f) : func(f) {}
|
2017-01-24 01:56:02 +08:00
|
|
|
|
|
|
|
// No moving OR copying
|
|
|
|
// main_thread_requests are always stack allocated, and we deal in pointers to them
|
2017-01-24 02:58:38 +08:00
|
|
|
void operator=(const main_thread_request_t &) = delete;
|
|
|
|
main_thread_request_t(const main_thread_request_t &) = delete;
|
|
|
|
main_thread_request_t(main_thread_request_t &&) = delete;
|
2013-11-28 08:04:12 +08:00
|
|
|
};
|
|
|
|
|
2017-01-30 13:06:46 +08:00
|
|
|
// Spawn support. Requests are allocated and come in on request_queue and go out on result_queue
|
|
|
|
struct thread_data_t {
|
|
|
|
std::queue<spawn_request_t> request_queue;
|
|
|
|
int thread_count = 0;
|
|
|
|
};
|
|
|
|
static owning_lock<thread_data_t> s_spawn_requests;
|
|
|
|
static owning_lock<std::queue<spawn_request_t>> s_result_queue;
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// "Do on main thread" support.
|
2017-08-19 03:26:35 +08:00
|
|
|
static std::mutex s_main_thread_performer_lock; // protects the main thread requests
|
|
|
|
static std::condition_variable s_main_thread_performer_cond; // protects the main thread requests
|
|
|
|
static std::mutex s_main_thread_request_q_lock; // protects the queue
|
2017-01-24 01:34:30 +08:00
|
|
|
static std::queue<main_thread_request_t *> s_main_thread_request_queue;
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Notifying pipes.
|
2011-12-27 13:21:12 +08:00
|
|
|
static int s_read_pipe, s_write_pipe;
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
static void iothread_init(void) {
|
2012-11-19 08:30:30 +08:00
|
|
|
static bool inited = false;
|
2016-05-02 12:01:00 +08:00
|
|
|
if (!inited) {
|
2012-11-19 08:30:30 +08:00
|
|
|
inited = true;
|
2012-11-18 18:23:22 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Initialize the completion pipes.
|
2012-11-19 08:30:30 +08:00
|
|
|
int pipes[2] = {0, 0};
|
2017-02-15 13:09:15 +08:00
|
|
|
assert_with_errno(pipe(pipes) != -1);
|
2012-11-19 08:30:30 +08:00
|
|
|
s_read_pipe = pipes[0];
|
|
|
|
s_write_pipe = pipes[1];
|
2012-11-18 18:23:22 +08:00
|
|
|
|
2017-01-24 01:59:56 +08:00
|
|
|
set_cloexec(s_read_pipe);
|
|
|
|
set_cloexec(s_write_pipe);
|
2012-11-18 18:23:22 +08:00
|
|
|
}
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2017-01-24 01:56:02 +08:00
|
|
|
static bool dequeue_spawn_request(spawn_request_t *result) {
|
2017-08-19 03:26:35 +08:00
|
|
|
auto &&locker = s_spawn_requests.acquire();
|
2017-01-30 13:06:46 +08:00
|
|
|
thread_data_t &td = locker.value;
|
|
|
|
if (!td.request_queue.empty()) {
|
|
|
|
*result = std::move(td.request_queue.front());
|
|
|
|
td.request_queue.pop();
|
2017-01-24 01:56:02 +08:00
|
|
|
return true;
|
2012-02-28 11:46:15 +08:00
|
|
|
}
|
2017-01-24 01:56:02 +08:00
|
|
|
return false;
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2017-01-24 01:56:02 +08:00
|
|
|
static void enqueue_thread_result(spawn_request_t req) {
|
2017-01-30 13:06:46 +08:00
|
|
|
s_result_queue.acquire().value.push(std::move(req));
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
2012-11-19 08:30:30 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
static void *this_thread() { return (void *)(intptr_t)pthread_self(); }
|
2014-04-28 08:23:19 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
/// The function that does thread work.
|
|
|
|
static void *iothread_worker(void *unused) {
|
2016-10-10 05:38:26 +08:00
|
|
|
UNUSED(unused);
|
2017-01-24 01:56:02 +08:00
|
|
|
struct spawn_request_t req;
|
|
|
|
while (dequeue_spawn_request(&req)) {
|
2017-06-19 13:07:48 +08:00
|
|
|
debug(5, "pthread %p dequeued", this_thread());
|
2016-05-02 12:01:00 +08:00
|
|
|
|
2017-01-30 13:06:46 +08:00
|
|
|
// Perform the work
|
2017-01-24 03:35:22 +08:00
|
|
|
req.handler();
|
2016-05-02 12:01:00 +08:00
|
|
|
|
2017-01-24 01:56:02 +08:00
|
|
|
// If there's a completion handler, we have to enqueue it on the result queue.
|
2017-01-24 03:35:22 +08:00
|
|
|
// Note we're using std::function's weirdo operator== here
|
|
|
|
if (req.completion != nullptr) {
|
2016-05-02 12:01:00 +08:00
|
|
|
// Enqueue the result, and tell the main thread about it.
|
2017-01-24 01:56:02 +08:00
|
|
|
enqueue_thread_result(std::move(req));
|
2014-04-18 03:02:43 +08:00
|
|
|
const char wakeup_byte = IO_SERVICE_RESULT_QUEUE;
|
2017-02-15 13:09:15 +08:00
|
|
|
assert_with_errno(write_loop(s_write_pipe, &wakeup_byte, sizeof wakeup_byte) != -1);
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
2012-11-19 08:30:30 +08:00
|
|
|
}
|
2016-05-02 12:01:00 +08:00
|
|
|
|
|
|
|
// We believe we have exhausted the thread request queue. We want to decrement
|
2017-01-30 13:06:46 +08:00
|
|
|
// thread_count and exit. But it's possible that a request just came in. Furthermore,
|
|
|
|
// it's possible that the main thread saw that thread_count is full, and decided to not
|
2016-05-02 12:01:00 +08:00
|
|
|
// spawn a new thread, trusting in one of the existing threads to handle it. But we've already
|
|
|
|
// committed to not handling anything else. Therefore, we have to decrement
|
2017-01-30 13:06:46 +08:00
|
|
|
// the thread count under the lock, which we still hold. Likewise, the main thread must
|
2016-05-02 12:01:00 +08:00
|
|
|
// check the value under the lock.
|
2017-01-30 13:06:46 +08:00
|
|
|
int new_thread_count = --s_spawn_requests.acquire().value.thread_count;
|
|
|
|
assert(new_thread_count >= 0);
|
2012-11-18 18:23:22 +08:00
|
|
|
|
2017-06-19 13:07:48 +08:00
|
|
|
debug(5, "pthread %p exiting", this_thread());
|
2016-05-02 12:01:00 +08:00
|
|
|
// We're done.
|
2014-04-18 03:02:43 +08:00
|
|
|
return NULL;
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
/// Spawn another thread. No lock is held when this is called.
|
|
|
|
static void iothread_spawn() {
|
|
|
|
// The spawned thread inherits our signal mask. We don't want the thread to ever receive signals
|
|
|
|
// on the spawned thread, so temporarily block all signals, spawn the thread, and then restore
|
|
|
|
// it.
|
2014-04-18 03:02:43 +08:00
|
|
|
sigset_t new_set, saved_set;
|
|
|
|
sigfillset(&new_set);
|
2017-02-15 13:09:15 +08:00
|
|
|
DIE_ON_FAILURE(pthread_sigmask(SIG_BLOCK, &new_set, &saved_set));
|
2014-04-18 03:02:43 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Spawn a thread. If this fails, it means there's already a bunch of threads; it is very
|
|
|
|
// unlikely that they are all on the verge of exiting, so one is likely to be ready to handle
|
|
|
|
// extant requests. So we can ignore failure with some confidence.
|
2014-04-28 08:23:19 +08:00
|
|
|
pthread_t thread = 0;
|
2014-04-18 03:02:43 +08:00
|
|
|
pthread_create(&thread, NULL, iothread_worker, NULL);
|
2016-05-02 12:01:00 +08:00
|
|
|
|
|
|
|
// We will never join this thread.
|
2017-02-15 13:09:15 +08:00
|
|
|
DIE_ON_FAILURE(pthread_detach(thread));
|
2017-06-19 13:07:48 +08:00
|
|
|
debug(5, "pthread %p spawned", (void *)(intptr_t)thread);
|
2016-05-02 12:01:00 +08:00
|
|
|
// Restore our sigmask.
|
2017-02-15 13:09:15 +08:00
|
|
|
DIE_ON_FAILURE(pthread_sigmask(SIG_SETMASK, &saved_set, NULL));
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2017-01-25 01:30:30 +08:00
|
|
|
int iothread_perform_impl(void_function_t &&func, void_function_t &&completion) {
|
2012-02-28 10:43:24 +08:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
ASSERT_IS_NOT_FORKED_CHILD();
|
2012-11-19 08:30:30 +08:00
|
|
|
iothread_init();
|
2017-01-27 12:00:43 +08:00
|
|
|
|
2017-01-24 03:35:22 +08:00
|
|
|
struct spawn_request_t req(std::move(func), std::move(completion));
|
2014-04-18 03:02:43 +08:00
|
|
|
int local_thread_count = -1;
|
|
|
|
bool spawn_new_thread = false;
|
|
|
|
{
|
2017-01-24 03:35:22 +08:00
|
|
|
// Lock around a local region.
|
2017-08-19 03:26:35 +08:00
|
|
|
auto &&locker = s_spawn_requests.acquire();
|
2017-01-30 13:06:46 +08:00
|
|
|
thread_data_t &td = locker.value;
|
|
|
|
td.request_queue.push(std::move(req));
|
|
|
|
if (td.thread_count < IO_MAX_THREADS) {
|
|
|
|
td.thread_count++;
|
2014-04-18 03:02:43 +08:00
|
|
|
spawn_new_thread = true;
|
|
|
|
}
|
2017-01-30 13:06:46 +08:00
|
|
|
local_thread_count = td.thread_count;
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
2016-05-02 12:01:00 +08:00
|
|
|
|
|
|
|
// Kick off the thread if we decided to do so.
|
|
|
|
if (spawn_new_thread) {
|
2014-04-18 03:02:43 +08:00
|
|
|
iothread_spawn();
|
|
|
|
}
|
|
|
|
return local_thread_count;
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
int iothread_port(void) {
|
2012-11-19 08:30:30 +08:00
|
|
|
iothread_init();
|
|
|
|
return s_read_pipe;
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
void iothread_service_completion(void) {
|
2012-02-28 11:46:15 +08:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2016-10-23 11:32:25 +08:00
|
|
|
char wakeup_byte;
|
|
|
|
|
2017-02-15 13:09:15 +08:00
|
|
|
assert_with_errno(read_loop(iothread_port(), &wakeup_byte, sizeof wakeup_byte) == 1);
|
2016-10-23 11:32:25 +08:00
|
|
|
if (wakeup_byte == IO_SERVICE_MAIN_THREAD_REQUEST_QUEUE) {
|
|
|
|
iothread_service_main_thread_requests();
|
|
|
|
} else if (wakeup_byte == IO_SERVICE_RESULT_QUEUE) {
|
|
|
|
iothread_service_result_queue();
|
|
|
|
} else {
|
2017-01-03 13:11:53 +08:00
|
|
|
debug(0, "Unknown wakeup byte %02x in %s", wakeup_byte, __FUNCTION__);
|
2013-11-28 08:04:12 +08:00
|
|
|
}
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
2012-11-18 18:23:22 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
static bool iothread_wait_for_pending_completions(long timeout_usec) {
|
2014-04-18 03:02:43 +08:00
|
|
|
const long usec_per_sec = 1000000;
|
|
|
|
struct timeval tv;
|
|
|
|
tv.tv_sec = timeout_usec / usec_per_sec;
|
|
|
|
tv.tv_usec = timeout_usec % usec_per_sec;
|
|
|
|
const int fd = iothread_port();
|
|
|
|
fd_set fds;
|
|
|
|
FD_ZERO(&fds);
|
|
|
|
FD_SET(fd, &fds);
|
|
|
|
int ret = select(fd + 1, &fds, NULL, NULL, &tv);
|
|
|
|
return ret > 0;
|
2011-12-27 13:21:12 +08:00
|
|
|
}
|
2012-02-28 11:46:15 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
/// Note that this function is quite sketchy. In particular, it drains threads, not requests,
|
|
|
|
/// meaning that it may leave requests on the queue. This is the desired behavior (it may be called
|
|
|
|
/// before fork, and we don't want to bother servicing requests before we fork), but in the test
|
|
|
|
/// suite we depend on it draining all requests. In practice, this works, because a thread in
|
|
|
|
/// practice won't exit while there is outstanding requests.
|
|
|
|
///
|
|
|
|
/// At the moment, this function is only used in the test suite and in a
|
|
|
|
/// drain-all-threads-before-fork compatibility mode that no architecture requires, so it's OK that
|
|
|
|
/// it's terrible.
|
|
|
|
void iothread_drain_all(void) {
|
2012-02-28 11:46:15 +08:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
2012-11-18 18:23:22 +08:00
|
|
|
ASSERT_IS_NOT_FORKED_CHILD();
|
2016-05-02 12:01:00 +08:00
|
|
|
|
2012-03-26 14:31:03 +08:00
|
|
|
#define TIME_DRAIN 0
|
|
|
|
#if TIME_DRAIN
|
2017-01-30 13:06:46 +08:00
|
|
|
int thread_count = s_spawn_requests.acquire().value.thread_count;
|
2012-03-07 07:12:37 +08:00
|
|
|
double now = timef();
|
2012-03-26 14:31:03 +08:00
|
|
|
#endif
|
2016-05-02 12:01:00 +08:00
|
|
|
|
|
|
|
// Nasty polling via select().
|
2017-01-30 13:06:46 +08:00
|
|
|
while (s_spawn_requests.acquire().value.thread_count > 0) {
|
2016-05-02 12:01:00 +08:00
|
|
|
if (iothread_wait_for_pending_completions(1000)) {
|
2014-04-18 03:02:43 +08:00
|
|
|
iothread_service_completion();
|
|
|
|
}
|
2012-02-28 11:46:15 +08:00
|
|
|
}
|
2012-03-26 14:31:03 +08:00
|
|
|
#if TIME_DRAIN
|
2012-03-07 07:12:37 +08:00
|
|
|
double after = timef();
|
2017-01-14 12:34:15 +08:00
|
|
|
fwprintf(stdout, L"(Waited %.02f msec for %d thread(s) to drain)\n", 1000 * (after - now),
|
|
|
|
thread_count);
|
2012-03-26 14:31:03 +08:00
|
|
|
#endif
|
2012-02-28 11:46:15 +08:00
|
|
|
}
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
/// "Do on main thread" support.
|
|
|
|
static void iothread_service_main_thread_requests(void) {
|
2013-11-28 08:04:12 +08:00
|
|
|
ASSERT_IS_MAIN_THREAD();
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Move the queue to a local variable.
|
2017-01-24 01:34:30 +08:00
|
|
|
std::queue<main_thread_request_t *> request_queue;
|
2013-11-28 08:04:12 +08:00
|
|
|
{
|
2016-11-03 12:54:57 +08:00
|
|
|
scoped_lock queue_lock(s_main_thread_request_q_lock);
|
2017-01-27 08:14:50 +08:00
|
|
|
request_queue.swap(s_main_thread_request_queue);
|
2013-11-28 08:04:12 +08:00
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
if (!request_queue.empty()) {
|
|
|
|
// Perform each of the functions. Note we are NOT responsible for deleting these. They are
|
|
|
|
// stack allocated in their respective threads!
|
|
|
|
while (!request_queue.empty()) {
|
2017-01-24 01:34:30 +08:00
|
|
|
main_thread_request_t *req = request_queue.front();
|
2013-11-28 08:04:12 +08:00
|
|
|
request_queue.pop();
|
2017-01-24 02:37:16 +08:00
|
|
|
req->func();
|
2013-11-28 08:04:12 +08:00
|
|
|
req->done = true;
|
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Ok, we've handled everybody. Announce the good news, and allow ourselves to be unlocked.
|
|
|
|
// Note we must do this while holding the lock. Otherwise we race with the waiting threads:
|
|
|
|
//
|
|
|
|
// 1. waiting thread checks for done, sees false
|
|
|
|
// 2. main thread performs request, sets done to true, posts to condition
|
|
|
|
// 3. waiting thread unlocks lock, waits on condition (forever)
|
|
|
|
//
|
|
|
|
// Because the waiting thread performs step 1 under the lock, if we take the lock, we avoid
|
|
|
|
// posting before the waiting thread is waiting.
|
2016-07-21 13:30:58 +08:00
|
|
|
scoped_lock broadcast_lock(s_main_thread_performer_lock);
|
2017-08-19 03:26:35 +08:00
|
|
|
s_main_thread_performer_cond.notify_all();
|
2013-11-28 08:04:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-24 01:56:02 +08:00
|
|
|
// Service the queue of results
|
2016-05-02 12:01:00 +08:00
|
|
|
static void iothread_service_result_queue() {
|
|
|
|
// Move the queue to a local variable.
|
2017-01-24 01:56:02 +08:00
|
|
|
std::queue<spawn_request_t> result_queue;
|
2017-01-30 13:06:46 +08:00
|
|
|
s_result_queue.acquire().value.swap(result_queue);
|
2016-05-02 12:01:00 +08:00
|
|
|
|
2017-01-30 13:06:46 +08:00
|
|
|
// Perform each completion in order
|
2016-05-02 12:01:00 +08:00
|
|
|
while (!result_queue.empty()) {
|
2017-02-14 10:48:59 +08:00
|
|
|
spawn_request_t req(std::move(result_queue.front()));
|
2014-04-18 03:02:43 +08:00
|
|
|
result_queue.pop();
|
2017-01-24 03:35:22 +08:00
|
|
|
// ensure we don't invoke empty functions, that raises an exception
|
|
|
|
if (req.completion != nullptr) {
|
|
|
|
req.completion();
|
2014-04-18 03:02:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-24 03:35:22 +08:00
|
|
|
void iothread_perform_on_main(void_function_t &&func) {
|
2016-05-02 12:01:00 +08:00
|
|
|
if (is_main_thread()) {
|
2017-01-24 02:37:16 +08:00
|
|
|
func();
|
|
|
|
return;
|
2013-11-30 05:31:18 +08:00
|
|
|
}
|
2013-11-28 08:04:12 +08:00
|
|
|
|
|
|
|
// Make a new request. Note we are synchronous, so this can be stack allocated!
|
2017-01-24 02:37:16 +08:00
|
|
|
main_thread_request_t req(std::move(func));
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Append it. Do not delete the nested scope as it is crucial to the proper functioning of this
|
|
|
|
// code by virtue of the lock management.
|
2013-11-28 08:04:12 +08:00
|
|
|
{
|
2016-11-03 12:54:57 +08:00
|
|
|
scoped_lock queue_lock(s_main_thread_request_q_lock);
|
2013-11-28 08:04:12 +08:00
|
|
|
s_main_thread_request_queue.push(&req);
|
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Tell the pipe.
|
2014-04-18 03:02:43 +08:00
|
|
|
const char wakeup_byte = IO_SERVICE_MAIN_THREAD_REQUEST_QUEUE;
|
2017-02-15 13:09:15 +08:00
|
|
|
assert_with_errno(write_loop(s_write_pipe, &wakeup_byte, sizeof wakeup_byte) != -1);
|
2013-11-28 08:04:12 +08:00
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Wait on the condition, until we're done.
|
2017-08-19 03:26:35 +08:00
|
|
|
std::unique_lock<std::mutex> perform_lock(s_main_thread_performer_lock);
|
2016-05-02 12:01:00 +08:00
|
|
|
while (!req.done) {
|
|
|
|
// It would be nice to support checking for cancellation here, but the clients need a
|
|
|
|
// deterministic way to clean up to avoid leaks
|
2017-08-19 03:26:35 +08:00
|
|
|
s_main_thread_performer_cond.wait(perform_lock);
|
2013-11-28 08:04:12 +08:00
|
|
|
}
|
|
|
|
|
2016-05-02 12:01:00 +08:00
|
|
|
// Ok, the request must now be done.
|
2013-11-28 08:04:12 +08:00
|
|
|
assert(req.done);
|
|
|
|
}
|