Clean up job_or_process_extent

This had a bad merge which happened to work, plus some other nonsense.
This commit is contained in:
ridiculousfish 2019-10-18 15:24:28 -07:00
parent 2fed311d4c
commit 9652b3e11b

View File

@ -289,14 +289,13 @@ void parse_util_cmdsubst_extent(const wchar_t *buff, size_t cursor_pos, const wc
/// Get the beginning and end of the job or process definition under the cursor. /// Get the beginning and end of the job or process definition under the cursor.
static void job_or_process_extent(const wchar_t *buff, size_t cursor_pos, const wchar_t **a, static void job_or_process_extent(const wchar_t *buff, size_t cursor_pos, const wchar_t **a,
const wchar_t **b, int process) { const wchar_t **b, bool process) {
assert(buff && "Null buffer"); assert(buff && "Null buffer");
const wchar_t *begin, *end; const wchar_t *begin = nullptr, *end = nullptr;
wchar_t *buffcpy;
int finished = 0; int finished = 0;
if (a) *a = 0; if (a) *a = nullptr;
if (b) *b = 0; if (b) *b = nullptr;
parse_util_cmdsubst_extent(buff, cursor_pos, &begin, &end); parse_util_cmdsubst_extent(buff, cursor_pos, &begin, &end);
if (!end || !begin) { if (!end || !begin) {
return; return;
@ -307,49 +306,46 @@ static void job_or_process_extent(const wchar_t *buff, size_t cursor_pos, const
if (a) *a = begin; if (a) *a = begin;
if (b) *b = end; if (b) *b = end;
buffcpy = wcsndup(begin, end - begin);
assert(buffcpy != NULL);
tokenizer_t tok(buffcpy, TOK_ACCEPT_UNFINISHED); const wcstring buffcpy(begin, end);
for (maybe_t<tok_t> token = tok.next(); token && !finished; token = tok.next()) tokenizer_t tok(buffcpy.c_str(), TOK_ACCEPT_UNFINISHED);
while ((token = tok.next()) && !finished) { maybe_t<tok_t> token{};
size_t tok_begin = token->offset; while ((token = tok.next()) && !finished) {
size_t tok_begin = token->offset;
switch (token->type) { switch (token->type) {
case token_type_t::pipe: { case token_type_t::pipe: {
if (!process) { if (!process) {
break;
}
}
/* FALLTHROUGH */
case token_type_t::end:
case token_type_t::background:
case token_type_t::andand:
case token_type_t::oror: {
if (tok_begin >= pos) {
finished = 1;
if (b) *b = (wchar_t *)begin + tok_begin;
} else {
if (a) *a = (wchar_t *)begin + tok_begin + token->length;
}
break;
}
default: {
break; break;
} }
} }
/* FALLTHROUGH */
case token_type_t::end:
case token_type_t::background:
case token_type_t::andand:
case token_type_t::oror: {
if (tok_begin >= pos) {
finished = 1;
if (b) *b = (wchar_t *)begin + tok_begin;
} else {
if (a) *a = (wchar_t *)begin + tok_begin + token->length;
}
break;
}
default: {
break;
}
} }
}
free(buffcpy);
} }
void parse_util_process_extent(const wchar_t *buff, size_t pos, const wchar_t **a, void parse_util_process_extent(const wchar_t *buff, size_t pos, const wchar_t **a,
const wchar_t **b) { const wchar_t **b) {
job_or_process_extent(buff, pos, a, b, 1); job_or_process_extent(buff, pos, a, b, true);
} }
void parse_util_job_extent(const wchar_t *buff, size_t pos, const wchar_t **a, const wchar_t **b) { void parse_util_job_extent(const wchar_t *buff, size_t pos, const wchar_t **a, const wchar_t **b) {
job_or_process_extent(buff, pos, a, b, 0); job_or_process_extent(buff, pos, a, b, false);
} }
void parse_util_token_extent(const wchar_t *buff, size_t cursor_pos, const wchar_t **tok_begin, void parse_util_token_extent(const wchar_t *buff, size_t cursor_pos, const wchar_t **tok_begin,