mirror of
https://github.com/fish-shell/fish-shell.git
synced 2024-11-28 12:13:55 +08:00
Remove support for blocking children
This removes support for blocking children via signals, which was used to orchestrate processes on WSL. Now we use the keepalive mechanism instead.
This commit is contained in:
parent
080521071f
commit
0c18f68cc2
104
src/exec.cpp
104
src/exec.cpp
|
@ -53,8 +53,6 @@
|
|||
/// Base open mode to pass to calls to open.
|
||||
#define OPEN_MASK 0666
|
||||
|
||||
static bool s_block_children = false;
|
||||
|
||||
/// Called in a forked child.
|
||||
static void exec_write_and_exit(int fd, const char *buff, size_t count, int status) {
|
||||
if (write_loop(fd, buff, count) == -1) {
|
||||
|
@ -399,13 +397,11 @@ void internal_exec(job_t *j, const io_chain_t &&all_ios) {
|
|||
|
||||
/// Execute an internal builtin. Given a parser, a job within that parser, and a process within that
|
||||
/// job corresponding to a builtin, execute the builtin with the given streams. If pipe_read is set,
|
||||
/// assign stdin to it; otherwise infer stdin from the IO chain. unblock_previous is a hack used to
|
||||
/// prevent jobs from finishing; see commit cdb72b7024.
|
||||
/// assign stdin to it; otherwise infer stdin from the IO chain.
|
||||
/// return true on success, false if there is an exec error.
|
||||
static bool exec_internal_builtin_proc(parser_t &parser, job_t *j, process_t *p,
|
||||
const io_pipe_t *pipe_read, const io_chain_t &proc_io_chain,
|
||||
io_streams_t &streams,
|
||||
const std::function<void(void)> &unblock_previous) {
|
||||
io_streams_t &streams) {
|
||||
assert(p->type == INTERNAL_BUILTIN && "Process must be a builtin");
|
||||
int local_builtin_stdin = STDIN_FILENO;
|
||||
bool close_stdin = false;
|
||||
|
@ -493,10 +489,6 @@ static bool exec_internal_builtin_proc(parser_t &parser, job_t *j, process_t *p,
|
|||
// make exec handle things.
|
||||
const int fg = j->get_flag(JOB_FOREGROUND);
|
||||
j->set_flag(JOB_FOREGROUND, false);
|
||||
|
||||
// Main loop may need to perform a blocking read from previous command's output.
|
||||
// Make sure read source is not blocked.
|
||||
unblock_previous();
|
||||
p->status = builtin_run(parser, p->get_argv(), streams);
|
||||
|
||||
// Restore the fg flag, which is temporarily set to false during builtin
|
||||
|
@ -616,9 +608,6 @@ void exec_job(parser_t &parser, job_t *j) {
|
|||
// We are careful to set these to -1 when closed, so if we exit the loop abruptly, we can still
|
||||
// close them.
|
||||
bool pgrp_set = false;
|
||||
// This is static since processes can block on input/output across jobs the main exec_job loop
|
||||
// is only ever run in a single thread, so this is OK.
|
||||
static pid_t blocked_pid = -1;
|
||||
int pipe_current_read = -1, pipe_current_write = -1, pipe_next_read = -1;
|
||||
for (std::unique_ptr<process_t> &unique_p : j->processes) {
|
||||
if (exec_error) {
|
||||
|
@ -639,7 +628,6 @@ void exec_job(parser_t &parser, job_t *j) {
|
|||
// Set to true if we end up forking for this process.
|
||||
bool child_forked = false;
|
||||
bool child_spawned = false;
|
||||
bool block_child = true;
|
||||
|
||||
// The pipes the current process write to and read from. Unfortunately these can't be just
|
||||
// allocated on the stack, since j->io wants shared_ptr.
|
||||
|
@ -745,23 +733,10 @@ void exec_job(parser_t &parser, job_t *j) {
|
|||
// a pipeline.
|
||||
shared_ptr<io_buffer_t> block_output_io_buffer;
|
||||
|
||||
// Used to SIGCONT the previously SIGSTOP'd process so the main loop or next command in job
|
||||
// can read from its output.
|
||||
auto unblock_previous = [&j]() {
|
||||
// We've already called waitpid after forking the child, so we've guaranteed that
|
||||
// they're already SIGSTOP'd. Otherwise we'd be risking a deadlock because we can call
|
||||
// SIGCONT before they've actually stopped, and they'll remain suspended indefinitely.
|
||||
if (blocked_pid != -1) {
|
||||
debug(2, L"Unblocking process %d.\n", blocked_pid);
|
||||
if (s_block_children) kill(blocked_pid, SIGCONT);
|
||||
blocked_pid = -1;
|
||||
}
|
||||
};
|
||||
|
||||
// This is the io_streams we pass to internal builtins.
|
||||
std::unique_ptr<io_streams_t> builtin_io_streams(new io_streams_t(stdout_read_limit));
|
||||
|
||||
auto do_fork = [&j, &p, &pid, &exec_error, &process_net_io_chain, &block_child,
|
||||
auto do_fork = [&j, &p, &pid, &exec_error, &process_net_io_chain,
|
||||
&child_forked](bool drain_threads, const char *fork_type,
|
||||
std::function<void()> child_action) -> bool {
|
||||
pid = execute_fork(drain_threads);
|
||||
|
@ -769,16 +744,7 @@ void exec_job(parser_t &parser, job_t *j) {
|
|||
// This is the child process. Setup redirections, print correct output to
|
||||
// stdout and stderr, and then exit.
|
||||
p->pid = getpid();
|
||||
blocked_pid = -1;
|
||||
child_set_group(j, p);
|
||||
// Make child processes pause after executing setup_child_process() to give
|
||||
// down-chain commands in the job a chance to join their process group and read
|
||||
// their pipes. The process will be resumed when the next command in the chain is
|
||||
// started.
|
||||
if (block_child && s_block_children) {
|
||||
kill(p->pid, SIGSTOP);
|
||||
}
|
||||
// The parent will wake us up when we're ready to execute.
|
||||
setup_child_process(p, process_net_io_chain);
|
||||
child_action();
|
||||
DIE("Child process returned control to do_fork lambda!");
|
||||
|
@ -794,10 +760,8 @@ void exec_job(parser_t &parser, job_t *j) {
|
|||
debug(2, L"Fork #%d, pid %d: %s for '%ls'", g_fork_count, pid, fork_type,
|
||||
p->argv0());
|
||||
child_forked = true;
|
||||
if (block_child) {
|
||||
debug(2, L"Blocking process %d waiting for next command in chain.\n", pid);
|
||||
}
|
||||
p->pid = pid;
|
||||
set_child_group(j, p->pid);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -867,7 +831,7 @@ void exec_job(parser_t &parser, job_t *j) {
|
|||
|
||||
case INTERNAL_BUILTIN: {
|
||||
if (!exec_internal_builtin_proc(parser, j, p, pipe_read.get(), process_net_io_chain,
|
||||
*builtin_io_streams, unblock_previous)) {
|
||||
*builtin_io_streams)) {
|
||||
exec_error = true;
|
||||
}
|
||||
break;
|
||||
|
@ -953,9 +917,8 @@ void exec_job(parser_t &parser, job_t *j) {
|
|||
bool must_fork = redirection_is_to_real_file(stdout_io.get()) ||
|
||||
redirection_is_to_real_file(stderr_io.get());
|
||||
if (!must_fork && p->is_last_in_job) {
|
||||
// We are handling reads directly in the main loop. Make sure source is
|
||||
// unblocked. Note that we may still end up forking.
|
||||
unblock_previous();
|
||||
// We are handling reads directly in the main loop. Note that we may still end
|
||||
// up forking.
|
||||
const bool stdout_is_to_buffer = stdout_io && stdout_io->io_mode == IO_BUFFER;
|
||||
const bool no_stdout_output = stdout_buffer.empty();
|
||||
const bool no_stderr_output = stderr_buffer.empty();
|
||||
|
@ -1108,8 +1071,9 @@ void exec_job(parser_t &parser, job_t *j) {
|
|||
}
|
||||
|
||||
// This is the parent process. Store away information on the child, and possibly
|
||||
// fice it control over the terminal.
|
||||
// give it control over the terminal.
|
||||
p->pid = pid;
|
||||
set_child_group(j, p->pid);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1120,51 +1084,6 @@ void exec_job(parser_t &parser, job_t *j) {
|
|||
}
|
||||
}
|
||||
|
||||
bool child_blocked = block_child && child_forked;
|
||||
if (child_blocked && s_block_children) {
|
||||
// We have to wait to ensure the child has set their progress group and is in SIGSTOP
|
||||
// state otherwise, we can later call SIGCONT before they call SIGSTOP and they'll be
|
||||
// blocked indefinitely. The child is SIGSTOP'd and is guaranteed to have called
|
||||
// child_set_group() at this point. but we only need to call set_child_group for the
|
||||
// first process in the group. If needs_keepalive is set, this has already been called
|
||||
// for the keepalive process.
|
||||
int result;
|
||||
int pid_status;
|
||||
while ((result = waitpid(p->pid, &pid_status, WUNTRACED)) == -1 && errno == EINTR) {
|
||||
// This could be a superfluous interrupt or Ctrl+C at the terminal In all cases, it
|
||||
// is OK to retry since the forking code above is specifically designed to never,
|
||||
// ever hang/block in a child process before the SIGSTOP call is reached.
|
||||
; // do nothing
|
||||
}
|
||||
if (result == -1) {
|
||||
exec_error = true;
|
||||
debug(1, L"waitpid(%d) call in unblock_pid failed:!\n", p->pid);
|
||||
wperror(L"waitpid");
|
||||
break;
|
||||
}
|
||||
// We are not unblocking the child via SIGCONT just yet to give the next process a
|
||||
// chance to open the pipes and join the process group. We only unblock the last process
|
||||
// in the job chain because no one awaits it.
|
||||
}
|
||||
// Regardless of whether the child blocked or not: only once per job, and only once we've
|
||||
// actually executed an external command.
|
||||
if ((child_spawned || child_forked) && !pgrp_set) {
|
||||
// This should be called after waitpid if child_forked and pipes_to_next_command it can
|
||||
// be called at any time if child_spawned.
|
||||
set_child_group(j, p->pid);
|
||||
// we can't rely on p->is_first_in_job because a builtin may have been the first.
|
||||
pgrp_set = true;
|
||||
}
|
||||
// If the command we ran _before_ this one was SIGSTOP'd to let this one catch up, unblock
|
||||
// it now. this must be after the wait_pid on the process we just started, if any.
|
||||
unblock_previous();
|
||||
|
||||
if (child_blocked) {
|
||||
// Store the newly-blocked command's PID so that it can be SIGCONT'd once the next
|
||||
// process in the chain is started. That may be in this job or in another job.
|
||||
blocked_pid = p->pid;
|
||||
}
|
||||
|
||||
// Close the pipe the current process uses to read from the previous process_t.
|
||||
if (pipe_current_read >= 0) {
|
||||
exec_close(pipe_current_read);
|
||||
|
@ -1176,11 +1095,6 @@ void exec_job(parser_t &parser, job_t *j) {
|
|||
exec_close(pipe_current_write);
|
||||
pipe_current_write = -1;
|
||||
}
|
||||
|
||||
// Unblock the last process because there's no need for it to stay SIGSTOP'd for anything.
|
||||
if (p->is_last_in_job) {
|
||||
unblock_previous();
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up any file descriptors we left open.
|
||||
|
|
|
@ -60,15 +60,7 @@ static void debug_safe_int(int level, const char *format, int val) {
|
|||
}
|
||||
|
||||
/// Called only by the child to set its own process group (possibly creating a new group in the
|
||||
/// process if it is the first in a JOB_CONTROL job. The parent will wait for this to finish.
|
||||
/// A process that isn't already in control of the terminal can't give itself control of the
|
||||
/// terminal without hanging, but it's not right for the child to try and give itself control
|
||||
/// from the very beginning because the parent may not have gotten around to doing so yet. Let
|
||||
/// the parent figure it out; if the child doesn't have terminal control and it later tries to
|
||||
/// read from the terminal, the kernel will send it SIGTTIN and it'll hang anyway.
|
||||
/// The key here is that the parent should transfer control of the terminal (if appropriate)
|
||||
/// prior to sending the child SIGCONT to wake it up to exec.
|
||||
///
|
||||
/// process if it is the first in a JOB_CONTROL job.
|
||||
/// Returns true on sucess, false on failiure.
|
||||
bool child_set_group(job_t *j, process_t *p) {
|
||||
bool retval = true;
|
||||
|
@ -100,7 +92,7 @@ bool child_set_group(job_t *j, process_t *p) {
|
|||
retval = false;
|
||||
}
|
||||
} else {
|
||||
// This is probably stays unused in the child.
|
||||
// The child does not actualyl use this field.
|
||||
j->pgid = getpgrp();
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user