mirror of
https://github.com/fish-shell/fish-shell.git
synced 2025-02-16 23:32:45 +08:00
Recover from bad redirections in the middle of a job pipeline
Currently fish aborts execution mid-pipeline if a file redirection
failed, which can leave the shell in a broken state (job abandoned after
giving control of the terminal to an already-executed job in the
pipeline).
This patch replaces a failed fd with a closed fd and continues execution
if the affected process wasn't the first in the pipeline.
While this is a hack to address the regression behind fish-shell/#7038
introduced in d62576c
, it can also be argued that this behavior is
actually more correct... right?
Closes #7038.
This commit is contained in:
parent
4785440f65
commit
bc756a981e
18
src/exec.cpp
18
src/exec.cpp
|
@ -745,8 +745,8 @@ static bool exec_block_or_func_process(parser_t &parser, const std::shared_ptr<j
|
|||
return true;
|
||||
}
|
||||
|
||||
/// Executes a process \p in job \j, using the pipes \p pipes (which may have invalid fds if this is
|
||||
/// the first or last process).
|
||||
/// Executes a process \p \p in \p job, using the pipes \p pipes (which may have invalid fds if this
|
||||
/// is the first or last process).
|
||||
/// \p deferred_pipes represents the pipes from our deferred process; if set ensure they get closed
|
||||
/// in any child. If \p is_deferred_run is true, then this is a deferred run; this affects how
|
||||
/// certain buffering works. \returns true on success, false on exec error.
|
||||
|
@ -800,8 +800,18 @@ static bool exec_process_in_job(parser_t &parser, process_t *p, const std::share
|
|||
// This may fail.
|
||||
if (!process_net_io_chain.append_from_specs(p->redirection_specs(),
|
||||
parser.vars().get_pwd_slash())) {
|
||||
// Error.
|
||||
return false;
|
||||
// If the redirection to a file failed, append_from_specs closes the corresponding handle
|
||||
// allowing us to recover from failure mid-way through execution of a piped job to e.g.
|
||||
// recover from loss of terminal control, etc.
|
||||
|
||||
// It is unsafe to abort execution in the middle of a multi-command job as we might end up
|
||||
// trying to resume execution without control of the terminal.
|
||||
if (p->is_first_in_job) {
|
||||
// It's OK to abort here
|
||||
return false;
|
||||
}
|
||||
// Otherwise, just rely on the closed fd to take care of things. It's also probably more
|
||||
// semantically correct!
|
||||
}
|
||||
|
||||
// Read pipe goes last.
|
||||
|
|
|
@ -253,6 +253,10 @@ bool io_chain_t::append_from_specs(const redirection_spec_list_t &specs, const w
|
|||
FLOGF(warning, FILE_ERROR, spec.target.c_str());
|
||||
if (should_flog(warning)) wperror(L"open");
|
||||
}
|
||||
// If opening a file fails, insert a closed FD instead of the file redirection
|
||||
// and return false. This lets execution potentially recover and at least gives
|
||||
// the shell a chance to gracefully regain control of the shell (see #7038).
|
||||
this->push_back(make_unique<io_close_t>(spec.fd));
|
||||
return false;
|
||||
}
|
||||
this->push_back(std::make_shared<io_file_t>(spec.fd, std::move(file)));
|
||||
|
|
4
src/io.h
4
src/io.h
|
@ -216,7 +216,9 @@ class io_file_t : public io_data_t {
|
|||
|
||||
io_file_t(int fd, autoclose_fd_t file)
|
||||
: io_data_t(io_mode_t::file, fd, file.fd()), file_fd_(std::move(file)) {
|
||||
assert(file_fd_.valid() && "File is not valid");
|
||||
// Invalid file redirections are replaced with a closed fd, so the following
|
||||
// assertion isn't guaranteed to pass:
|
||||
// assert(file_fd_.valid() && "File is not valid");
|
||||
}
|
||||
|
||||
~io_file_t() override;
|
||||
|
|
Loading…
Reference in New Issue
Block a user