Big fat refactoring of how redirections work. In fish 1.x and 2.0.0, the redirections for a process were flattened into a big list associated with the job, so there was no way to tell which redirections applied to each process. Each process therefore got all the redirections associated with the job. See https://github.com/fish-shell/fish-shell/issues/877 for how this could manifest.

With this change, jobs only track their block-level redirections. Process level redirections are correctly associated with the process, and at exec time we stitch them together (block, pipe, and process redirects).

This fixes the weird issues where redirects bleed across pipelines (like #877), and also allows us to play with the order in which redirections are applied, since the final list is constructed right before it's needed.  This lets us put pipes after block level redirections but before process level redirections, so that a 2>&1-type redirection gets picked up after the pipe, i.e. it should fix https://github.com/fish-shell/fish-shell/issues/110

This is a significant change. The tests all pass. Cross your fingers.
This commit is contained in:
ridiculousfish 2013-08-19 16:16:41 -07:00
parent f4f2847662
commit 4899086b3c
11 changed files with 182 additions and 116 deletions

166
exec.cpp
View File

@ -167,6 +167,21 @@ static bool redirection_is_to_real_file(const io_data_t *io)
return result;
}
static bool chain_contains_redirection_to_real_file(const io_chain_t &io_chain)
{
bool result = false;
for (size_t idx=0; idx < io_chain.size(); idx++)
{
const shared_ptr<const io_data_t> &io = io_chain.at(idx);
if (redirection_is_to_real_file(io.get()))
{
result = true;
break;
}
}
return result;
}
void print_open_fds(void)
{
for (size_t i=0; i < open_fds.size(); ++i)
@ -539,15 +554,9 @@ static bool can_use_posix_spawn_for_job(const job_t *job, const process_t *proce
/* Now see if we have a redirection involving a file. The only one we allow is /dev/null, which we assume will not fail. */
bool result = true;
const io_chain_t &ios = job->io_chain();
for (size_t idx = 0; idx < ios.size(); idx++)
if (chain_contains_redirection_to_real_file(job->block_io_chain()) || chain_contains_redirection_to_real_file(process->io_chain()))
{
const shared_ptr<const io_data_t> &io = ios.at(idx);
if (redirection_is_to_real_file(io.get()))
{
result = false;
break;
}
result = false;
}
return result;
}
@ -584,13 +593,11 @@ static void exec_no_exec(parser_t &parser, const job_t *job)
}
}
void exec(parser_t &parser, job_t *j)
void exec_job(parser_t &parser, job_t *j)
{
pid_t pid = 0;
sigset_t chldset;
shared_ptr<io_buffer_t> io_buffer;
/*
Set to true if something goes wrong while exec:ing the job, in
which case the cleanup code will kick in.
@ -615,34 +622,35 @@ void exec(parser_t &parser, job_t *j)
debug(4, L"Exec job '%ls' with id %d", j->command_wcstr(), j->job_id);
if (! parser.block_io.empty())
{
j->io.insert(j->io.begin(), parser.block_io.begin(), parser.block_io.end());
}
/* PCA Here we detect the special case of an input buffer redirection, i.e. we want a process to receive data that we hold in a buffer (it is an INPUT for the process, but an output for fish). This is extremely rare: I believe only run_pager creates these and it would be nice to dump it. So we can only have at most one.
const io_buffer_t *input_redirect = NULL;
const io_chain_t &ios = j->io_chain();
for (size_t idx = 0; idx < ios.size(); idx++)
It would be great to wean fish_pager off of input redirections so that we can dump input redirections and the INTERNAL_BUFFER process type altogether.
*/
const io_buffer_t *single_magic_input_redirect = NULL;
const io_chain_t all_ios = j->all_io_redirections();
for (size_t idx = 0; idx < all_ios.size(); idx++)
{
const shared_ptr<io_data_t> &io = ios.at(idx);
const shared_ptr<io_data_t> &io = all_ios.at(idx);
if ((io->io_mode == IO_BUFFER))
{
CAST_INIT(io_buffer_t *, io_buffer, io.get());
if (io_buffer->is_input)
{
/* We expect to have at most one of these, per the comment above. Note that this assertion is the only reason we don't break out of the loop below */
assert(single_magic_input_redirect == NULL && "Should have at most one input IO_BUFFER");
/*
Input redirection - create a new gobetween process to take
care of buffering, save the redirection in input_redirect
*/
process_t *fake = new process_t();
fake->type = INTERNAL_BUFFER;
fake->pipe_write_fd = 1;
fake->pipe_write_fd = STDOUT_FILENO;
j->first_process->pipe_read_fd = io->fd;
fake->next = j->first_process;
j->first_process = fake;
input_redirect = io_buffer;
break;
single_magic_input_redirect = io_buffer;
}
}
}
@ -658,7 +666,9 @@ void exec(parser_t &parser, job_t *j)
setup_child_process makes sure signals are properly set
up. It will also call signal_unblock
*/
if (!setup_child_process(j, 0))
/* PCA This is for handling exec. Passing all_ios here matches what fish 2.0.0 and 1.x did. It's known to be wrong - for example, it means that redirections bound for subsequent commands in the pipeline will apply to exec. However, using exec in a pipeline doesn't really make sense, so I'm not trying to fix it here. */
if (!setup_child_process(j, 0, all_ios))
{
/*
launch_process _never_ returns
@ -752,6 +762,9 @@ void exec(parser_t &parser, job_t *j)
int pipe_current_read = -1, pipe_current_write = -1, pipe_next_read = -1;
for (process_t *p=j->first_process; p; p = p->next)
{
/* The IO chain for this process. It starts with the block IO, then pipes, and then gets any from the process */
io_chain_t process_net_io_chain = j->block_io_chain();
/* "Consume" any pipe_next_read by making it current */
assert(pipe_current_read == -1);
pipe_current_read = pipe_next_read;
@ -762,7 +775,23 @@ void exec(parser_t &parser, job_t *j)
/* The pipes the current process write to and read from.
Unfortunately these can't be just allocated on the stack, since
j->io wants shared_ptr. */
j->io wants shared_ptr.
The write pipe (destined for stdout) needs to occur before redirections. For example, with a redirection like this:
`foo 2>&1 | bar`, what we want to happen is this:
dup2(pipe, stdout)
dup2(stdout, stderr)
so that stdout and stderr both wind up referencing the pipe.
The read pipe (destined for stdin) is more ambiguous. Imagine a pipeline like this:
echo alpha | cat < beta.txt
Should cat output alpha or beta? bash and ksh output 'beta', tcsh gets it right and complains about ambiguity, and zsh outputs both (!). No shells appear to output 'alpha', so we match bash here. That means putting the pipe first, so that it gets trumped by the file redirection.
*/
shared_ptr<io_pipe_t> pipe_write;
shared_ptr<io_pipe_t> pipe_read;
@ -771,16 +800,19 @@ void exec(parser_t &parser, job_t *j)
pipe_read.reset(new io_pipe_t(p->pipe_read_fd, true));
/* Record the current read in pipe_read */
pipe_read->pipe_fd[0] = pipe_current_read;
j->append_io(pipe_read);
process_net_io_chain.push_back(pipe_read);
}
if (p->next)
{
pipe_write.reset(new io_pipe_t(p->pipe_write_fd, false));
j->append_io(pipe_write);
process_net_io_chain.push_back(pipe_write);
}
/* Now that we've added our pipes, add the rest of the IO redirections associated with that process */
process_net_io_chain.append(p->io_chain());
/*
This call is used so the global environment variable array
is regenerated, if needed, before the fork. That way, we
@ -826,14 +858,12 @@ void exec(parser_t &parser, job_t *j)
//fprintf(stderr, "before IO: ");
//io_print(j->io);
// This is the IO buffer we use for storing the output of a block or function when it is in a pipeline
shared_ptr<io_buffer_t> block_output_io_buffer;
switch (p->type)
{
case INTERNAL_FUNCTION:
{
int shadows;
/*
Calls to function_get_definition might need to
source a file as a part of autoloading, hence there
@ -845,7 +875,7 @@ void exec(parser_t &parser, job_t *j)
bool function_exists = function_get_definition(p->argv0(), &def);
wcstring_list_t named_arguments = function_get_named_arguments(p->argv0());
shadows = function_get_shadows(p->argv0());
bool shadows = function_get_shadows(p->argv0());
signal_block();
@ -871,21 +901,22 @@ void exec(parser_t &parser, job_t *j)
if (p->next)
{
// Be careful to handle failure, e.g. too many open fds
io_buffer.reset(io_buffer_t::create(0));
if (io_buffer.get() == NULL)
block_output_io_buffer.reset(io_buffer_t::create(false /* = not input */, STDOUT_FILENO));
if (block_output_io_buffer.get() == NULL)
{
exec_error = true;
job_mark_process_as_failed(j, p);
}
else
{
j->append_io(io_buffer);
#warning is this bad because we need to be able to read this from select_try?
process_net_io_chain.push_back(block_output_io_buffer);
}
}
if (! exec_error)
{
internal_exec_helper(parser, def.c_str(), TOP, j->io_chain());
internal_exec_helper(parser, def.c_str(), TOP, process_net_io_chain);
}
parser.allow_function();
@ -898,21 +929,22 @@ void exec(parser_t &parser, job_t *j)
{
if (p->next)
{
io_buffer.reset(io_buffer_t::create(0));
if (io_buffer.get() == NULL)
#warning is this bad because we need to be able to read this from select_try?
block_output_io_buffer.reset(io_buffer_t::create(0));
if (block_output_io_buffer.get() == NULL)
{
exec_error = true;
job_mark_process_as_failed(j, p);
}
else
{
j->io.push_back(io_buffer);
process_net_io_chain.push_back(block_output_io_buffer);
}
}
if (! exec_error)
{
internal_exec_helper(parser, p->argv0(), TOP, j->io);
internal_exec_helper(parser, p->argv0(), TOP, process_net_io_chain);
}
break;
@ -930,7 +962,7 @@ void exec(parser_t &parser, job_t *j)
*/
if (p == j->first_process)
{
const shared_ptr<const io_data_t> in = io_chain_get(j->io, 0);
const shared_ptr<const io_data_t> in = process_net_io_chain.get_io_for_fd(STDIN_FILENO);
if (in)
{
@ -1029,15 +1061,15 @@ void exec(parser_t &parser, job_t *j)
builtin_push_io(parser, builtin_stdin);
builtin_out_redirect = has_fd(j->io, 1);
builtin_err_redirect = has_fd(j->io, 2);
builtin_out_redirect = has_fd(process_net_io_chain, STDOUT_FILENO);
builtin_err_redirect = has_fd(process_net_io_chain, STDERR_FILENO);
const int fg = job_get_flag(j, JOB_FOREGROUND);
job_set_flag(j, JOB_FOREGROUND, 0);
signal_unblock();
p->status = builtin_run(parser, p->get_argv(), j->io);
p->status = builtin_run(parser, p->get_argv(), process_net_io_chain);
builtin_out_redirect=old_out;
builtin_err_redirect=old_err;
@ -1083,7 +1115,7 @@ void exec(parser_t &parser, job_t *j)
to buffer such io, since otherwise the internal pipe
buffer might overflow.
*/
if (!io_buffer)
if (! block_output_io_buffer.get())
{
/*
No buffer, so we exit directly. This means we
@ -1097,14 +1129,16 @@ void exec(parser_t &parser, job_t *j)
break;
}
io_remove(j->io, io_buffer);
// Here we must have a non-NULL block_output_io_buffer
assert(block_output_io_buffer.get() != NULL);
io_remove(process_net_io_chain, block_output_io_buffer);
io_buffer->read();
block_output_io_buffer->read();
const char *buffer = io_buffer->out_buffer_ptr();
size_t count = io_buffer->out_buffer_size();
const char *buffer = block_output_io_buffer->out_buffer_ptr();
size_t count = block_output_io_buffer->out_buffer_size();
if (io_buffer->out_buffer_size() > 0)
if (block_output_io_buffer->out_buffer_size() > 0)
{
/* We don't have to drain threads here because our child process is simple */
if (g_log_forks)
@ -1119,9 +1153,9 @@ void exec(parser_t &parser, job_t *j)
This is the child process. Write out the contents of the pipeline.
*/
p->pid = getpid();
setup_child_process(j, p);
setup_child_process(j, p, process_net_io_chain);
exec_write_and_exit(io_buffer->fd, buffer, count, status);
exec_write_and_exit(block_output_io_buffer->fd, buffer, count, status);
}
else
{
@ -1145,7 +1179,7 @@ void exec(parser_t &parser, job_t *j)
p->completed = 1;
}
io_buffer.reset();
block_output_io_buffer.reset();
break;
}
@ -1153,9 +1187,9 @@ void exec(parser_t &parser, job_t *j)
case INTERNAL_BUFFER:
{
const char *buffer = input_redirect->out_buffer_ptr();
size_t count = input_redirect->out_buffer_size();
assert(single_magic_input_redirect != NULL);
const char *buffer = single_magic_input_redirect->out_buffer_ptr();
size_t count = single_magic_input_redirect->out_buffer_size();
/* We don't have to drain threads here because our child process is simple */
if (g_log_forks)
@ -1170,7 +1204,7 @@ void exec(parser_t &parser, job_t *j)
contents of the pipeline.
*/
p->pid = getpid();
setup_child_process(j, p);
setup_child_process(j, p, process_net_io_chain);
exec_write_and_exit(1, buffer, count, 0);
}
@ -1201,8 +1235,8 @@ void exec(parser_t &parser, job_t *j)
bool fork_was_skipped = false;
const shared_ptr<io_data_t> stdout_io = io_chain_get(j->io, STDOUT_FILENO);
const shared_ptr<io_data_t> stderr_io = io_chain_get(j->io, STDERR_FILENO);
const shared_ptr<io_data_t> stdout_io = process_net_io_chain.get_io_for_fd(STDOUT_FILENO);
const shared_ptr<io_data_t> stderr_io = process_net_io_chain.get_io_for_fd(STDERR_FILENO);
/* If we are outputting to a file, we have to actually do it, even if we have no output, so that we can truncate the file. Does not apply to /dev/null. */
bool must_fork = redirection_is_to_real_file(stdout_io.get()) || redirection_is_to_real_file(stderr_io.get());
@ -1292,7 +1326,7 @@ void exec(parser_t &parser, job_t *j)
if (g_log_forks)
{
printf("fork #%d: Executing fork for internal builtin for '%ls'\n", g_fork_count, p->argv0());
io_print(j->io);
io_print(process_net_io_chain);
}
pid = execute_fork(false);
if (pid == 0)
@ -1303,7 +1337,7 @@ void exec(parser_t &parser, job_t *j)
then exit.
*/
p->pid = getpid();
setup_child_process(j, p);
setup_child_process(j, p, process_net_io_chain);
do_builtin_io(outbuff, outbuff_len, errbuff, errbuff_len);
exit_without_destructors(p->status);
}
@ -1346,7 +1380,7 @@ void exec(parser_t &parser, job_t *j)
printf("fork #%d: forking for '%s' in '%ls:%ls'\n", g_fork_count, actual_cmd, file ? file : L"", func ? func : L"?");
fprintf(stderr, "IO chain for %s:\n", actual_cmd);
io_print(j->io);
io_print(process_net_io_chain);
}
#if FISH_USE_POSIX_SPAWN
@ -1357,7 +1391,7 @@ void exec(parser_t &parser, job_t *j)
/* Create posix spawn attributes and actions */
posix_spawnattr_t attr = posix_spawnattr_t();
posix_spawn_file_actions_t actions = posix_spawn_file_actions_t();
bool made_it = fork_actions_make_spawn_properties(&attr, &actions, j, p);
bool made_it = fork_actions_make_spawn_properties(&attr, &actions, j, p, process_net_io_chain);
if (made_it)
{
/* We successfully made the attributes and actions; actually call posix_spawn */
@ -1393,7 +1427,7 @@ void exec(parser_t &parser, job_t *j)
{
/* This is the child process. */
p->pid = getpid();
setup_child_process(j, p);
setup_child_process(j, p, process_net_io_chain);
safe_launch_process(p, actual_cmd, argv, envv);
/*
@ -1441,12 +1475,6 @@ void exec(parser_t &parser, job_t *j)
exec_close(pipe_current_write);
pipe_current_write = -1;
}
if (pipe_write.get())
j->io.remove(pipe_write);
if (pipe_read.get())
j->io.remove(pipe_read);
}
/* Clean up any file descriptors we left open */

2
exec.h
View File

@ -42,7 +42,7 @@
*/
class parser_t;
void exec(parser_t &parser, job_t *j);
void exec_job(parser_t &parser, job_t *j);
/**
Evaluate the expression cmd in a subshell, add the outputs into the

7
io.cpp
View File

@ -128,7 +128,7 @@ io_buffer_t *io_buffer_t::create(bool is_input, int fd)
bool success = true;
if (fd == -1)
{
fd = is_input ? 0 : 1;
fd = is_input ? STDIN_FILENO : STDOUT_FILENO;
}
io_buffer_t *buffer_redirect = new io_buffer_t(fd, is_input);
@ -208,6 +208,11 @@ void io_chain_t::push_front(const shared_ptr<io_data_t> &element)
this->insert(this->begin(), element);
}
void io_chain_t::append(const io_chain_t &chain)
{
this->insert(this->end(), chain.begin(), chain.end());
}
void io_remove(io_chain_t &list, const shared_ptr<const io_data_t> &element)
{
list.remove(element);

1
io.h
View File

@ -189,6 +189,7 @@ public:
void remove(const shared_ptr<const io_data_t> &element);
void push_back(const shared_ptr<io_data_t> &element);
void push_front(const shared_ptr<io_data_t> &element);
void append(const io_chain_t &chain);
shared_ptr<const io_data_t> get_io_for_fd(int fd) const;
shared_ptr<io_data_t> get_io_for_fd(int fd);

View File

@ -1168,9 +1168,9 @@ int parser_t::is_help(const wchar_t *s, int min_match) const
(len >= (size_t)min_match && (wcsncmp(L"--help", s, len) == 0));
}
job_t *parser_t::job_create(void)
job_t *parser_t::job_create()
{
job_t *res = new job_t(acquire_job_id());
job_t *res = new job_t(acquire_job_id(), this->block_io);
this->my_job_list.push_front(res);
job_set_flag(res,
@ -1256,6 +1256,9 @@ void parser_t::parse_job_argument_list(process_t *p,
wcstring unmatched;
int unmatched_pos=0;
/* The set of IO redirections that we construct for the process */
io_chain_t process_io_chain;
/*
Test if this is the 'count' command. We need to special case
count in the shell, since it should display a help message on
@ -1559,7 +1562,7 @@ void parser_t::parse_job_argument_list(process_t *p,
if (new_io.get() != NULL)
{
j->append_io(new_io);
process_io_chain.push_back(new_io);
}
}
@ -1613,7 +1616,9 @@ void parser_t::parse_job_argument_list(process_t *p,
}
}
return;
/* Store our IO chain. The existing chain should be empty. */
assert(p->io_chain().empty());
p->set_io_chain(process_io_chain);
}
/*
@ -2256,7 +2261,7 @@ void parser_t::skipped_exec(job_t * j)
{
if (!current_block->outer->skip)
{
exec(*this, j);
exec_job(*this, j);
return;
}
parser_t::pop_block();
@ -2269,7 +2274,7 @@ void parser_t::skipped_exec(job_t * j)
const if_block_t *ib = static_cast<const if_block_t*>(current_block);
if (ib->if_expr_evaluated && ! ib->any_branch_taken)
{
exec(*this, j);
exec_job(*this, j);
return;
}
}
@ -2278,7 +2283,7 @@ void parser_t::skipped_exec(job_t * j)
{
if (current_block->type() == SWITCH)
{
exec(*this, j);
exec_job(*this, j);
return;
}
}
@ -2415,7 +2420,7 @@ void parser_t::eval_job(tokenizer_t *tok)
if (j->first_process->type==INTERNAL_BUILTIN && !j->first_process->next)
was_builtin = 1;
scoped_push<int> tokenizer_pos_push(&current_tokenizer_pos, job_begin_pos);
exec(*this, j);
exec_job(*this, j);
/* Only external commands require a new fishd barrier */
if (!was_builtin)

View File

@ -352,6 +352,9 @@ private:
void print_errors(wcstring &target, const wchar_t *prefix);
void print_errors_stderr();
/** Create a job */
job_t *job_create();
public:
std::vector<profile_item_t*> profile_items;
@ -457,9 +460,6 @@ public:
/** Return a description of the given blocktype */
const wchar_t *get_block_desc(int block) const;
/** Create a job */
job_t *job_create();
/** Removes a job */
bool job_remove(job_t *job);

View File

@ -292,7 +292,7 @@ static int handle_child_io(const io_chain_t &io_chain)
}
int setup_child_process(job_t *j, process_t *p)
int setup_child_process(job_t *j, process_t *p, const io_chain_t &io_chain)
{
bool ok=true;
@ -303,7 +303,7 @@ int setup_child_process(job_t *j, process_t *p)
if (ok)
{
ok = (0 == handle_child_io(j->io_chain()));
ok = (0 == handle_child_io(io_chain));
if (p != 0 && ! ok)
{
exit_without_destructors(1);
@ -379,7 +379,7 @@ pid_t execute_fork(bool wait_for_threads_to_die)
}
#if FISH_USE_POSIX_SPAWN
bool fork_actions_make_spawn_properties(posix_spawnattr_t *attr, posix_spawn_file_actions_t *actions, job_t *j, process_t *p)
bool fork_actions_make_spawn_properties(posix_spawnattr_t *attr, posix_spawn_file_actions_t *actions, job_t *j, process_t *p, const io_chain_t &io_chain)
{
/* Initialize the output */
if (posix_spawnattr_init(attr) != 0)
@ -444,19 +444,19 @@ bool fork_actions_make_spawn_properties(posix_spawnattr_t *attr, posix_spawn_fil
err = posix_spawnattr_setsigmask(attr, &sigmask);
/* Make sure that our pipes don't use an fd that the redirection itself wants to use */
free_redirected_fds_from_pipes(j->io_chain());
free_redirected_fds_from_pipes(io_chain);
/* Close unused internal pipes */
std::vector<int> files_to_close;
get_unused_internal_pipes(files_to_close, j->io_chain());
get_unused_internal_pipes(files_to_close, io_chain);
for (size_t i = 0; ! err && i < files_to_close.size(); i++)
{
err = posix_spawn_file_actions_addclose(actions, files_to_close.at(i));
}
for (size_t idx = 0; idx < j->io_chain().size(); idx++)
for (size_t idx = 0; idx < io_chain.size(); idx++)
{
shared_ptr<const io_data_t> io = j->io_chain().at(idx);
const shared_ptr<const io_data_t> io = io_chain.at(idx);
if (io->io_mode == IO_FD)
{

View File

@ -53,13 +53,13 @@ int set_child_group(job_t *j, process_t *p, int print_errors);
\param j the job to set up the IO for
\param p the child process to set up
\param io_chain the IO chain to use (ignores the job's iochain)
\param io_chain the IO chain to use
\return 0 on sucess, -1 on failiure. When this function returns,
signals are always unblocked. On failiure, signal handlers, io
redirections and process group of the process is undefined.
*/
int setup_child_process(job_t *j, process_t *p);
int setup_child_process(job_t *j, process_t *p, const io_chain_t &io_chain);
/* Call fork(), optionally waiting until we are no longer multithreaded. If the forked child doesn't do anything that could allocate memory, take a lock, etc. (like call exec), then it's not necessary to wait for threads to die. If the forked child may do those things, it should wait for threads to die.
*/
@ -73,7 +73,7 @@ void safe_report_exec_error(int err, const char *actual_cmd, const char * const
#if FISH_USE_POSIX_SPAWN
/* Initializes and fills in a posix_spawnattr_t; on success, the caller should destroy it via posix_spawnattr_destroy */
bool fork_actions_make_spawn_properties(posix_spawnattr_t *attr, posix_spawn_file_actions_t *actions, job_t *j, process_t *p);
bool fork_actions_make_spawn_properties(posix_spawnattr_t *attr, posix_spawn_file_actions_t *actions, job_t *j, process_t *p, const io_chain_t &io_chain);
#endif
#endif

View File

@ -537,10 +537,10 @@ process_t::~process_t()
delete this->next;
}
job_t::job_t(job_id_t jobid) :
job_t::job_t(job_id_t jobid, const io_chain_t &bio) :
command_str(),
command_narrow(),
io(),
block_io(bio),
first_process(NULL),
pgid(0),
tmodes(),
@ -556,6 +556,17 @@ job_t::~job_t()
release_job_id(job_id);
}
/* Return all the IO redirections. Start with the block IO, then walk over the processes */
io_chain_t job_t::all_io_redirections() const
{
io_chain_t result = this->block_io;
for (process_t *p = this->first_process; p != NULL; p = p->next)
{
result.append(p->io_chain());
}
return result;
}
/* This is called from a signal handler */
void job_handle_signal(int signal, siginfo_t *info, void *con)
{
@ -874,9 +885,10 @@ static int select_try(job_t *j)
FD_ZERO(&fds);
for (size_t idx = 0; idx < j->io_chain().size(); idx++)
const io_chain_t chain = j->all_io_redirections();
for (size_t idx = 0; idx < chain.size(); idx++)
{
const io_data_t *io = j->io_chain().at(idx).get();
const io_data_t *io = chain.at(idx).get();
if (io->io_mode == IO_BUFFER)
{
CAST_INIT(const io_pipe_t *, io_pipe, io);
@ -915,9 +927,10 @@ static void read_try(job_t *j)
/*
Find the last buffer, which is the one we want to read from
*/
for (size_t idx = 0; idx < j->io_chain().size(); idx++)
const io_chain_t chain = j->all_io_redirections();
for (size_t idx = 0; idx < chain.size(); idx++)
{
io_data_t *d = j->io_chain().at(idx).get();
io_data_t *d = chain.at(idx).get();
if (d->io_mode == IO_BUFFER)
{
buff = static_cast<io_buffer_t *>(d);

26
proc.h
View File

@ -134,6 +134,8 @@ private:
/* narrow copy of argv0 so we don't have to convert after fork */
narrow_string_rep_t argv0_narrow;
io_chain_t process_io_chain;
/* No copying */
process_t(const process_t &rhs);
void operator=(const process_t &rhs);
@ -190,6 +192,17 @@ public:
return argv0_narrow.get();
}
/* IO chain getter and setter */
const io_chain_t &io_chain() const
{
return process_io_chain;
}
void set_io_chain(const io_chain_t &chain)
{
this->process_io_chain = chain;
}
/** actual command to pass to exec in case of EXTERNAL or INTERNAL_EXEC. */
wcstring actual_cmd;
@ -285,9 +298,8 @@ class job_t
/* narrow copy so we don't have to convert after fork */
narrow_string_rep_t command_narrow;
/** List of all IO redirections for this job. */
io_chain_t io;
friend void exec(parser_t &parser, job_t *j);
/* The IO chain associated with the block */
const io_chain_t block_io;
/* No copying */
job_t(const job_t &rhs);
@ -295,7 +307,7 @@ class job_t
public:
job_t(job_id_t jobid);
job_t(job_id_t jobid, const io_chain_t &bio);
~job_t();
/** Returns whether the command is empty. */
@ -360,9 +372,11 @@ public:
*/
unsigned int flags;
const io_chain_t &io_chain() const { return this->io; }
/* Returns the block IO redirections associated with the job. These are things like the IO redirections associated with the begin...end statement. */
const io_chain_t &block_io_chain() const { return this->block_io; }
void append_io(const shared_ptr<io_data_t> & new_io) { this->io.push_back(new_io); }
/* Fetch all the IO redirections associated with the job */
io_chain_t all_io_redirections() const;
};
/**

View File

@ -1274,11 +1274,11 @@ static void run_pager(const wcstring &prefix, int is_quoted, const std::vector<c
wcstring prefix_esc;
char *foo;
shared_ptr<io_buffer_t> in(io_buffer_t::create(true, 3));
shared_ptr<io_buffer_t> out(io_buffer_t::create(false, 4));
shared_ptr<io_buffer_t> in_buff(io_buffer_t::create(true, 3));
shared_ptr<io_buffer_t> out_buff(io_buffer_t::create(false, 4));
// The above may fail e.g. if we have too many open fds
if (in.get() == NULL || out.get() == NULL)
if (in_buff.get() == NULL || out_buff.get() == NULL)
return;
wchar_t *escaped_separator;
@ -1350,26 +1350,26 @@ static void run_pager(const wcstring &prefix, int is_quoted, const std::vector<c
free(escaped_separator);
foo = wcs2str(msg.c_str());
in->out_buffer_append(foo, strlen(foo));
in_buff->out_buffer_append(foo, strlen(foo));
free(foo);
term_donate();
parser_t &parser = parser_t::principal_parser();
io_chain_t io_chain;
io_chain.push_back(out);
io_chain.push_back(in);
io_chain.push_back(out_buff);
io_chain.push_back(in_buff);
parser.eval(cmd, io_chain, TOP);
term_steal();
out->read();
out_buff->read();
int nil=0;
out->out_buffer_append((char *)&nil, 1);
const char zero = 0;
out_buff->out_buffer_append(&zero, 1);
const char *outbuff = out->out_buffer_ptr();
if (outbuff)
const char *out_data = out_buff->out_buffer_ptr();
if (out_data)
{
const wcstring str = str2wcstring(outbuff);
const wcstring str = str2wcstring(out_data);
size_t idx = str.size();
while (idx--)
{