mirror of
https://github.com/fish-shell/fish-shell.git
synced 2024-11-25 09:39:52 +08:00
Merge branch 'master' into pager
This commit is contained in:
commit
4a20febe96
|
@ -4298,7 +4298,7 @@ int builtin_parse(parser_t &parser, wchar_t **argv)
|
|||
const wcstring src = str2wcstring(&txt.at(0), txt.size());
|
||||
parse_node_tree_t parse_tree;
|
||||
parse_error_list_t errors;
|
||||
bool success = parse_tree_from_string(src, parse_flag_none, &parse_tree, &errors, true);
|
||||
bool success = parse_tree_from_string(src, parse_flag_none, &parse_tree, &errors);
|
||||
if (! success)
|
||||
{
|
||||
stdout_buffer.append(L"Parsing failed:\n");
|
||||
|
@ -4311,7 +4311,7 @@ int builtin_parse(parser_t &parser, wchar_t **argv)
|
|||
stdout_buffer.append(L"(Reparsed with continue after error)\n");
|
||||
parse_tree.clear();
|
||||
errors.clear();
|
||||
parse_tree_from_string(src, parse_flag_continue_after_error, &parse_tree, &errors, true);
|
||||
parse_tree_from_string(src, parse_flag_continue_after_error, &parse_tree, &errors);
|
||||
}
|
||||
const wcstring dump = parse_dump_tree(parse_tree, src);
|
||||
stdout_buffer.append(dump);
|
||||
|
|
|
@ -2417,6 +2417,38 @@ static void test_new_parser_ll2(void)
|
|||
if (deco != tests[i].deco)
|
||||
err(L"When parsing '%ls', expected decoration %d but got %d on line %ld", tests[i].src.c_str(), (int)tests[i].deco, (int)deco, (long)__LINE__);
|
||||
}
|
||||
|
||||
/* Verify that 'function -h' and 'function --help' are plain statements but 'function --foo' is not (#1240) */
|
||||
const struct
|
||||
{
|
||||
wcstring src;
|
||||
parse_token_type_t type;
|
||||
}
|
||||
tests2[] =
|
||||
{
|
||||
{L"function -h", symbol_plain_statement},
|
||||
{L"function --help", symbol_plain_statement},
|
||||
{L"function --foo ; end", symbol_function_header},
|
||||
{L"function foo ; end", symbol_function_header},
|
||||
};
|
||||
for (size_t i=0; i < sizeof tests2 / sizeof *tests2; i++)
|
||||
{
|
||||
parse_node_tree_t tree;
|
||||
if (! parse_tree_from_string(tests2[i].src, parse_flag_none, &tree, NULL))
|
||||
{
|
||||
err(L"Failed to parse '%ls'", tests2[i].src.c_str());
|
||||
}
|
||||
|
||||
const parse_node_tree_t::parse_node_list_t node_list = tree.find_nodes(tree.at(0), tests2[i].type);
|
||||
if (node_list.size() == 0)
|
||||
{
|
||||
err(L"Failed to find node of type '%ls'", token_type_description(tests2[i].type).c_str());
|
||||
}
|
||||
else if (node_list.size() > 1)
|
||||
{
|
||||
err(L"Found too many nodes of type '%ls'", token_type_description(tests2[i].type).c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void test_new_parser_ad_hoc()
|
||||
|
@ -2452,7 +2484,9 @@ static void test_new_parser_errors(void)
|
|||
}
|
||||
tests[] =
|
||||
{
|
||||
{L"echo (abc", parse_error_tokenizer},
|
||||
{L"echo 'abc", parse_error_tokenizer_unterminated_quote},
|
||||
{L"'", parse_error_tokenizer_unterminated_quote},
|
||||
{L"echo (abc", parse_error_tokenizer_unterminated_subshell},
|
||||
|
||||
{L"end", parse_error_unbalancing_end},
|
||||
{L"echo hi ; end", parse_error_unbalancing_end},
|
||||
|
|
|
@ -114,7 +114,11 @@ enum parse_error_code_t
|
|||
|
||||
parse_error_generic, // unclassified error types
|
||||
|
||||
parse_error_tokenizer, //tokenizer error
|
||||
//tokenizer errors
|
||||
parse_error_tokenizer_unterminated_quote,
|
||||
parse_error_tokenizer_unterminated_subshell,
|
||||
parse_error_tokenizer_unterminated_escape,
|
||||
parse_error_tokenizer_other,
|
||||
|
||||
parse_error_unbalancing_end, //end outside of block
|
||||
parse_error_unbalancing_else, //else outside of if
|
||||
|
|
|
@ -116,12 +116,22 @@ PRODUCTIONS(statement) =
|
|||
};
|
||||
RESOLVE(statement)
|
||||
{
|
||||
// Go to decorated statements if the subsequent token looks like '--'
|
||||
// If we are 'begin', then we expect to be invoked with no arguments. But if we are anything else, we require an argument, so do the same thing if the subsequent token is a line end.
|
||||
/* The only block-like builtin that takes any parameters is 'function' So go to decorated statements if the subsequent token looks like '--'.
|
||||
The logic here is subtle:
|
||||
If we are 'begin', then we expect to be invoked with no arguments.
|
||||
If we are 'function', then we are a non-block if we are invoked with -h or --help
|
||||
If we are anything else, we require an argument, so do the same thing if the subsequent token is a statement terminator.
|
||||
*/
|
||||
|
||||
if (token1.type == parse_token_type_string)
|
||||
{
|
||||
// If the next token looks like an option (starts with a dash), then parse it as a decorated statement
|
||||
if (token2.has_dash_prefix)
|
||||
// If we are a function, then look for help arguments
|
||||
// Othewrise, if the next token looks like an option (starts with a dash), then parse it as a decorated statement
|
||||
if (token1.keyword == parse_keyword_function && token2.is_help_argument)
|
||||
{
|
||||
return 4;
|
||||
}
|
||||
else if (token1.keyword != parse_keyword_function && token2.has_dash_prefix)
|
||||
{
|
||||
return 4;
|
||||
}
|
||||
|
|
|
@ -583,7 +583,7 @@ class parse_ll_t
|
|||
void accept_tokens(parse_token_t token1, parse_token_t token2);
|
||||
|
||||
/* Report tokenizer errors */
|
||||
void report_tokenizer_error(parse_token_t token, const wchar_t *tok_error);
|
||||
void report_tokenizer_error(parse_token_t token, int tok_err, const wchar_t *tok_error);
|
||||
|
||||
/* Indicate if we hit a fatal error */
|
||||
bool has_fatal_error(void) const
|
||||
|
@ -786,10 +786,31 @@ void parse_ll_t::parse_error_failed_production(struct parse_stack_element_t &sta
|
|||
}
|
||||
}
|
||||
|
||||
void parse_ll_t::report_tokenizer_error(parse_token_t token, const wchar_t *tok_error)
|
||||
void parse_ll_t::report_tokenizer_error(parse_token_t token, int tok_err_code, const wchar_t *tok_error)
|
||||
{
|
||||
assert(tok_error != NULL);
|
||||
this->parse_error(token, parse_error_tokenizer, L"%ls", tok_error);
|
||||
parse_error_code_t parse_error_code;
|
||||
switch (tok_err_code)
|
||||
{
|
||||
case TOK_UNTERMINATED_QUOTE:
|
||||
parse_error_code = parse_error_tokenizer_unterminated_quote;
|
||||
break;
|
||||
|
||||
case TOK_UNTERMINATED_SUBSHELL:
|
||||
parse_error_code = parse_error_tokenizer_unterminated_subshell;
|
||||
break;
|
||||
|
||||
case TOK_UNTERMINATED_ESCAPE:
|
||||
parse_error_code = parse_error_tokenizer_unterminated_escape;
|
||||
break;
|
||||
|
||||
case TOK_OTHER:
|
||||
default:
|
||||
parse_error_code = parse_error_tokenizer_other;
|
||||
break;
|
||||
|
||||
}
|
||||
this->parse_error(token, parse_error_code, L"%ls", tok_error);
|
||||
}
|
||||
|
||||
void parse_ll_t::parse_error(const wchar_t *expected, parse_token_t token)
|
||||
|
@ -1049,6 +1070,11 @@ static const parse_token_t kInvalidToken = {token_type_invalid, parse_keyword_no
|
|||
/* Terminal token */
|
||||
static const parse_token_t kTerminalToken = {parse_token_type_terminate, parse_keyword_none, false, -1, -1};
|
||||
|
||||
static inline bool is_help_argument(const wchar_t *txt)
|
||||
{
|
||||
return ! wcscmp(txt, L"-h") || ! wcscmp(txt, L"--help");
|
||||
}
|
||||
|
||||
/* Return a new parse token, advancing the tokenizer */
|
||||
static inline parse_token_t next_parse_token(tokenizer_t *tok)
|
||||
{
|
||||
|
@ -1069,6 +1095,7 @@ static inline parse_token_t next_parse_token(tokenizer_t *tok)
|
|||
result.type = parse_token_type_from_tokenizer_token(tok_type);
|
||||
result.keyword = keyword_for_token(tok_type, tok_txt);
|
||||
result.has_dash_prefix = (tok_txt[0] == L'-');
|
||||
result.is_help_argument = result.has_dash_prefix && is_help_argument(tok_txt);
|
||||
result.source_start = (size_t)tok_start;
|
||||
result.source_length = tok_extent;
|
||||
|
||||
|
@ -1076,7 +1103,7 @@ static inline parse_token_t next_parse_token(tokenizer_t *tok)
|
|||
return result;
|
||||
}
|
||||
|
||||
bool parse_tree_from_string(const wcstring &str, parse_tree_flags_t parse_flags, parse_node_tree_t *output, parse_error_list_t *errors, bool log_it)
|
||||
bool parse_tree_from_string(const wcstring &str, parse_tree_flags_t parse_flags, parse_node_tree_t *output, parse_error_list_t *errors)
|
||||
{
|
||||
parse_ll_t parser;
|
||||
parser.set_should_generate_error_messages(errors != NULL);
|
||||
|
@ -1095,10 +1122,10 @@ bool parse_tree_from_string(const wcstring &str, parse_tree_flags_t parse_flags,
|
|||
tokenizer_t tok = tokenizer_t(str.c_str(), tok_options);
|
||||
|
||||
/* We are an LL(2) parser. We pass two tokens at a time. New tokens come in at index 1. Seed our queue with an initial token at index 1. */
|
||||
parse_token_t queue[2] = {kInvalidToken, next_parse_token(&tok)};
|
||||
|
||||
/* Loop until we get a terminal token */
|
||||
do
|
||||
parse_token_t queue[2] = {kInvalidToken, kInvalidToken};
|
||||
|
||||
/* Loop until we have a terminal token. */
|
||||
for (size_t token_count = 0; queue[0].type != parse_token_type_terminate; token_count++)
|
||||
{
|
||||
/* Push a new token onto the queue */
|
||||
queue[0] = queue[1];
|
||||
|
@ -1110,13 +1137,16 @@ bool parse_tree_from_string(const wcstring &str, parse_tree_flags_t parse_flags,
|
|||
break;
|
||||
}
|
||||
|
||||
/* Pass these two tokens. We know that queue[0] is valid; queue[1] may be invalid. */
|
||||
parser.accept_tokens(queue[0], queue[1]);
|
||||
/* Pass these two tokens, unless we're still loading the queue. We know that queue[0] is valid; queue[1] may be invalid. */
|
||||
if (token_count > 0)
|
||||
{
|
||||
parser.accept_tokens(queue[0], queue[1]);
|
||||
}
|
||||
|
||||
/* Handle tokenizer errors. This is a hack because really the parser should report this for itself; but it has no way of getting the tokenizer message */
|
||||
if (queue[1].type == parse_special_type_tokenizer_error)
|
||||
{
|
||||
parser.report_tokenizer_error(queue[1], tok_last(&tok));
|
||||
parser.report_tokenizer_error(queue[1], tok_get_error(&tok), tok_last(&tok));
|
||||
}
|
||||
|
||||
/* Handle errors */
|
||||
|
@ -1138,10 +1168,7 @@ bool parse_tree_from_string(const wcstring &str, parse_tree_flags_t parse_flags,
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* If this was the last token, then stop the loop */
|
||||
} while (queue[0].type != parse_token_type_terminate);
|
||||
|
||||
}
|
||||
|
||||
// Teach each node where its source range is
|
||||
parser.determine_node_ranges();
|
||||
|
|
|
@ -47,6 +47,7 @@ struct parse_token_t
|
|||
enum parse_token_type_t type; // The type of the token as represented by the parser
|
||||
enum parse_keyword_t keyword; // Any keyword represented by this token
|
||||
bool has_dash_prefix; // Hackish: whether the source contains a dash prefix
|
||||
bool is_help_argument; // Hackish: whether the source looks like '-h' or '--help'
|
||||
size_t source_start;
|
||||
size_t source_length;
|
||||
|
||||
|
@ -197,7 +198,7 @@ public:
|
|||
};
|
||||
|
||||
/* The big entry point. Parse a string! */
|
||||
bool parse_tree_from_string(const wcstring &str, parse_tree_flags_t flags, parse_node_tree_t *output, parse_error_list_t *errors, bool log_it = false);
|
||||
bool parse_tree_from_string(const wcstring &str, parse_tree_flags_t flags, parse_node_tree_t *output, parse_error_list_t *errors);
|
||||
|
||||
/* Fish grammar:
|
||||
|
||||
|
|
|
@ -992,9 +992,27 @@ parser_test_error_bits_t parse_util_detect_errors(const wcstring &buff_src, pars
|
|||
// We detect this via an 'end_command' block without source
|
||||
bool has_unclosed_block = false;
|
||||
|
||||
// Whether there's an unclosed quote, and therefore unfinished
|
||||
bool has_unclosed_quote = false;
|
||||
|
||||
// Parse the input string into a parse tree
|
||||
// Some errors are detected here
|
||||
bool parsed = parse_tree_from_string(buff_src, parse_flag_leave_unterminated, &node_tree, &parse_errors);
|
||||
|
||||
for (size_t i=0; i < parse_errors.size(); i++)
|
||||
{
|
||||
if (parse_errors.at(i).code == parse_error_tokenizer_unterminated_quote)
|
||||
{
|
||||
// Remove this error, since we don't consider it a real error
|
||||
has_unclosed_quote = true;
|
||||
parse_errors.erase(parse_errors.begin() + i);
|
||||
i--;
|
||||
}
|
||||
}
|
||||
// #1238: If the only error was unterminated quote, then consider this to have parsed successfully. A better fix would be to have parse_tree_from_string return this information directly (but it would be a shame to munge up its nice bool return).
|
||||
if (parse_errors.empty() && has_unclosed_quote)
|
||||
parsed = true;
|
||||
|
||||
if (! parsed)
|
||||
{
|
||||
errored = true;
|
||||
|
@ -1120,7 +1138,7 @@ parser_test_error_bits_t parse_util_detect_errors(const wcstring &buff_src, pars
|
|||
if (errored)
|
||||
res |= PARSER_TEST_ERROR;
|
||||
|
||||
if (has_unclosed_block)
|
||||
if (has_unclosed_block || has_unclosed_quote)
|
||||
res |= PARSER_TEST_INCOMPLETE;
|
||||
|
||||
if (out_errors)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
complete -c lsusb -s v -l verbose --description "Increase verbosity (show descriptors)"
|
||||
complete -x -c lsusb -s s -a '(__fish_complete_usb)' --description "Show only devices with specified device and/or bus numbers (in decimal)"
|
||||
complete -x -c lsusb -s s -a '(__fish_complete_lsusb)' --description "Show only devices with specified device and/or bus numbers (in decimal)"
|
||||
complete -c lsusb -s d --description "Show only devices with the specified vendor and product ID numbers (in hexadecimal)"
|
||||
complete -c lsusb -s D -l device --description "Selects which device lsusb will examine"
|
||||
complete -c lsusb -s t -l tree --description "Dump the physical USB device hierarchy as a tree"
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
function __fish_complete_usb
|
||||
function __fish_complete_lsusb
|
||||
lsusb | awk '{print $2 ":" $4}'| cut -c1-7
|
||||
end
|
Loading…
Reference in New Issue
Block a user