2016-05-03 07:09:46 +08:00
|
|
|
// Programmatic representation of fish code.
|
2016-05-19 06:30:21 +08:00
|
|
|
#include "config.h" // IWYU pragma: keep
|
|
|
|
|
2015-07-25 23:14:25 +08:00
|
|
|
#include <assert.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <wchar.h>
|
2016-04-21 14:00:54 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cwchar>
|
2016-05-03 07:09:46 +08:00
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
2016-04-21 14:00:54 +08:00
|
|
|
|
2015-07-25 23:14:25 +08:00
|
|
|
#include "common.h"
|
2016-06-24 13:44:58 +08:00
|
|
|
#include "fallback.h"
|
2015-07-25 23:14:25 +08:00
|
|
|
#include "parse_constants.h"
|
2013-07-26 06:24:22 +08:00
|
|
|
#include "parse_productions.h"
|
2015-07-25 23:14:25 +08:00
|
|
|
#include "parse_tree.h"
|
2016-05-03 07:09:46 +08:00
|
|
|
#include "proc.h"
|
2013-06-09 10:20:26 +08:00
|
|
|
#include "tokenizer.h"
|
2016-06-24 13:44:58 +08:00
|
|
|
#include "wutil.h" // IWYU pragma: keep
|
2013-06-02 13:14:47 +08:00
|
|
|
|
2016-04-06 06:43:24 +08:00
|
|
|
// This array provides strings for each symbol in enum parse_token_type_t in parse_constants.h.
|
2016-05-03 07:09:46 +08:00
|
|
|
const wchar_t *const token_type_map[] = {
|
2016-04-06 06:43:24 +08:00
|
|
|
L"token_type_invalid",
|
|
|
|
L"symbol_job_list",
|
|
|
|
L"symbol_job",
|
|
|
|
L"symbol_job_continuation",
|
|
|
|
L"symbol_statement",
|
|
|
|
L"symbol_block_statement",
|
|
|
|
L"symbol_block_header",
|
|
|
|
L"symbol_for_header",
|
|
|
|
L"symbol_while_header",
|
|
|
|
L"symbol_begin_header",
|
|
|
|
L"symbol_function_header",
|
|
|
|
L"symbol_if_statement",
|
|
|
|
L"symbol_if_clause",
|
|
|
|
L"symbol_else_clause",
|
|
|
|
L"symbol_else_continuation",
|
|
|
|
L"symbol_switch_statement",
|
|
|
|
L"symbol_case_item_list",
|
|
|
|
L"symbol_case_item",
|
|
|
|
L"symbol_boolean_statement",
|
|
|
|
L"symbol_decorated_statement",
|
|
|
|
L"symbol_plain_statement",
|
|
|
|
L"symbol_arguments_or_redirections_list",
|
|
|
|
L"symbol_argument_or_redirection",
|
|
|
|
L"symbol_andor_job_list",
|
|
|
|
L"symbol_argument_list",
|
|
|
|
L"symbol_freestanding_argument_list",
|
|
|
|
L"symbol_argument",
|
|
|
|
L"symbol_redirection",
|
|
|
|
L"symbol_optional_background",
|
|
|
|
L"symbol_end_command",
|
|
|
|
L"parse_token_type_string",
|
|
|
|
L"parse_token_type_pipe",
|
|
|
|
L"parse_token_type_redirection",
|
|
|
|
L"parse_token_type_background",
|
|
|
|
L"parse_token_type_end",
|
|
|
|
L"parse_token_type_terminate",
|
|
|
|
L"parse_special_type_parse_error",
|
|
|
|
L"parse_special_type_tokenizer_error",
|
|
|
|
L"parse_special_type_comment",
|
2016-05-03 07:09:46 +08:00
|
|
|
};
|
2016-04-06 06:43:24 +08:00
|
|
|
|
2013-07-29 06:19:38 +08:00
|
|
|
using namespace parse_productions;
|
2013-06-02 13:14:47 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
static bool production_is_empty(const production_t *production) {
|
2013-12-09 13:54:06 +08:00
|
|
|
return (*production)[0] == token_type_invalid;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Returns a string description of this parse error.
|
|
|
|
wcstring parse_error_t::describe_with_prefix(const wcstring &src, const wcstring &prefix,
|
|
|
|
bool is_interactive, bool skip_caret) const {
|
2013-06-16 06:21:35 +08:00
|
|
|
wcstring result = text;
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-10-30 11:51:03 +08:00
|
|
|
if (skip_caret || source_start >= src.size() || source_start + source_length > src.size()) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Locate the beginning of this line of source.
|
|
|
|
size_t line_start = 0;
|
|
|
|
|
|
|
|
// Look for a newline prior to source_start. If we don't find one, start at the beginning of
|
|
|
|
// the string; otherwise start one past the newline. Note that source_start may itself point
|
|
|
|
// at a newline; we want to find the newline before it.
|
|
|
|
if (source_start > 0) {
|
|
|
|
size_t newline = src.find_last_of(L'\n', source_start - 1);
|
|
|
|
if (newline != wcstring::npos) {
|
|
|
|
line_start = newline + 1;
|
2013-06-16 06:21:35 +08:00
|
|
|
}
|
2016-10-30 11:51:03 +08:00
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-10-30 11:51:03 +08:00
|
|
|
// Look for the newline after the source range. If the source range itself includes a
|
|
|
|
// newline, that's the one we want, so start just before the end of the range.
|
|
|
|
size_t last_char_in_range =
|
|
|
|
(source_length == 0 ? source_start : source_start + source_length - 1);
|
|
|
|
size_t line_end = src.find(L'\n', last_char_in_range);
|
|
|
|
if (line_end == wcstring::npos) {
|
|
|
|
line_end = src.size();
|
|
|
|
}
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-10-30 11:51:03 +08:00
|
|
|
assert(line_end >= line_start);
|
|
|
|
assert(source_start >= line_start);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-10-30 11:51:03 +08:00
|
|
|
// Don't include the caret and line if we're interactive this is the first line, because
|
|
|
|
// then it's obvious.
|
|
|
|
bool interactive_skip_caret = is_interactive && source_start == 0;
|
|
|
|
|
|
|
|
if (interactive_skip_caret) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the line of text.
|
|
|
|
if (!result.empty()) {
|
|
|
|
result.push_back(L'\n');
|
|
|
|
}
|
|
|
|
result.append(prefix);
|
|
|
|
result.append(src, line_start, line_end - line_start);
|
|
|
|
|
|
|
|
// Append the caret line. The input source may include tabs; for that reason we
|
|
|
|
// construct a "caret line" that has tabs in corresponding positions.
|
|
|
|
const wcstring line_to_measure = prefix + wcstring(src, line_start, source_start - line_start);
|
|
|
|
wcstring caret_space_line;
|
|
|
|
caret_space_line.reserve(source_start - line_start);
|
|
|
|
for (size_t i = 0; i < line_to_measure.size(); i++) {
|
|
|
|
wchar_t wc = line_to_measure.at(i);
|
|
|
|
if (wc == L'\t') {
|
|
|
|
caret_space_line.push_back(L'\t');
|
|
|
|
} else if (wc == L'\n') {
|
|
|
|
// It's possible that the source_start points at a newline itself. In that case,
|
|
|
|
// pretend it's a space. We only expect this to be at the end of the string.
|
|
|
|
caret_space_line.push_back(L' ');
|
|
|
|
} else {
|
|
|
|
int width = fish_wcwidth(wc);
|
|
|
|
if (width > 0) {
|
|
|
|
caret_space_line.append(static_cast<size_t>(width), L' ');
|
2013-12-13 10:18:07 +08:00
|
|
|
}
|
|
|
|
}
|
2013-06-16 06:21:35 +08:00
|
|
|
}
|
2016-10-30 11:51:03 +08:00
|
|
|
result.push_back(L'\n');
|
|
|
|
result.append(caret_space_line);
|
|
|
|
result.push_back(L'^');
|
2013-06-16 06:21:35 +08:00
|
|
|
return result;
|
|
|
|
}
|
2013-06-02 13:14:47 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
wcstring parse_error_t::describe(const wcstring &src) const {
|
2016-05-15 11:35:54 +08:00
|
|
|
return this->describe_with_prefix(src, wcstring(), shell_is_interactive(), false);
|
2014-02-18 06:51:51 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
void parse_error_offset_source_start(parse_error_list_t *errors, size_t amt) {
|
2014-03-22 08:13:33 +08:00
|
|
|
assert(errors != NULL);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (amt > 0) {
|
2014-03-22 08:13:33 +08:00
|
|
|
size_t i, max = errors->size();
|
2016-05-03 07:09:46 +08:00
|
|
|
for (i = 0; i < max; i++) {
|
2014-03-22 08:13:33 +08:00
|
|
|
parse_error_t *error = &errors->at(i);
|
2016-05-03 07:09:46 +08:00
|
|
|
// Preserve the special meaning of -1 as 'unknown'.
|
|
|
|
if (error->source_start != SOURCE_LOCATION_UNKNOWN) {
|
2014-03-22 08:13:33 +08:00
|
|
|
error->source_start += amt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Returns a string description for the given token type.
|
|
|
|
const wchar_t *token_type_description(parse_token_type_t type) {
|
2016-04-11 10:08:07 +08:00
|
|
|
if (type >= 0 && type <= LAST_TOKEN_TYPE) return token_type_map[type];
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-04-11 10:08:07 +08:00
|
|
|
// This leaks memory but it should never be run unless we have a bug elsewhere in the code.
|
|
|
|
const wcstring d = format_string(L"unknown_token_type_%ld", static_cast<long>(type));
|
|
|
|
wchar_t *d2 = new wchar_t[d.size() + 1];
|
|
|
|
// cppcheck-suppress memleak
|
|
|
|
return std::wcscpy(d2, d.c_str());
|
2013-06-09 10:20:26 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
#define LONGIFY(x) L##x
|
|
|
|
#define KEYWORD_MAP(x) \
|
|
|
|
{ parse_keyword_##x, LONGIFY(#x) }
|
|
|
|
static const struct {
|
2014-03-10 07:37:49 +08:00
|
|
|
const parse_keyword_t keyword;
|
2016-05-03 07:09:46 +08:00
|
|
|
const wchar_t *const name;
|
2014-03-10 07:37:49 +08:00
|
|
|
}
|
|
|
|
keyword_map[] =
|
|
|
|
{
|
2016-05-03 07:09:46 +08:00
|
|
|
// Note that these must be sorted (except for the first), so that we can do binary search.
|
2014-03-10 07:37:49 +08:00
|
|
|
KEYWORD_MAP(none),
|
2014-10-16 03:04:23 +08:00
|
|
|
KEYWORD_MAP(and),
|
2014-03-10 07:37:49 +08:00
|
|
|
KEYWORD_MAP(begin),
|
2014-10-16 03:04:23 +08:00
|
|
|
KEYWORD_MAP(builtin),
|
2014-03-10 07:37:49 +08:00
|
|
|
KEYWORD_MAP(case),
|
2014-10-16 03:04:23 +08:00
|
|
|
KEYWORD_MAP(command),
|
|
|
|
KEYWORD_MAP(else),
|
2014-03-10 07:37:49 +08:00
|
|
|
KEYWORD_MAP(end),
|
2014-10-16 03:04:23 +08:00
|
|
|
KEYWORD_MAP(exec),
|
|
|
|
KEYWORD_MAP(for),
|
|
|
|
KEYWORD_MAP(function),
|
|
|
|
KEYWORD_MAP(if),
|
|
|
|
KEYWORD_MAP(in),
|
2014-03-10 07:37:49 +08:00
|
|
|
KEYWORD_MAP(not),
|
2014-10-16 03:04:23 +08:00
|
|
|
KEYWORD_MAP(or),
|
|
|
|
KEYWORD_MAP(switch),
|
|
|
|
KEYWORD_MAP(while)
|
2014-03-10 07:37:49 +08:00
|
|
|
};
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
const wchar_t *keyword_description(parse_keyword_t type) {
|
2016-04-11 10:08:07 +08:00
|
|
|
if (type >= 0 && type <= LAST_KEYWORD) return keyword_map[type].name;
|
|
|
|
|
|
|
|
// This leaks memory but it should never be run unless we have a bug elsewhere in the code.
|
|
|
|
const wcstring d = format_string(L"unknown_keyword_%ld", static_cast<long>(type));
|
|
|
|
wchar_t *d2 = new wchar_t[d.size() + 1];
|
|
|
|
// cppcheck-suppress memleak
|
|
|
|
return std::wcscpy(d2, d.c_str());
|
2013-06-23 17:09:46 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
static wcstring token_type_user_presentable_description(
|
|
|
|
parse_token_type_t type, parse_keyword_t keyword = parse_keyword_none) {
|
|
|
|
if (keyword != parse_keyword_none) {
|
2016-04-11 10:08:07 +08:00
|
|
|
return format_string(L"keyword '%ls'", keyword_description(keyword));
|
2014-01-01 16:04:02 +08:00
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
switch (type) {
|
|
|
|
// Hackish. We only support the following types.
|
|
|
|
case symbol_statement: {
|
2014-01-13 07:10:59 +08:00
|
|
|
return L"a command";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case symbol_argument: {
|
2014-03-28 02:34:18 +08:00
|
|
|
return L"an argument";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case parse_token_type_string: {
|
2014-01-01 16:04:02 +08:00
|
|
|
return L"a string";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case parse_token_type_pipe: {
|
2014-01-01 16:04:02 +08:00
|
|
|
return L"a pipe";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case parse_token_type_redirection: {
|
2014-01-01 16:04:02 +08:00
|
|
|
return L"a redirection";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case parse_token_type_background: {
|
2014-01-01 16:04:02 +08:00
|
|
|
return L"a '&'";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case parse_token_type_end: {
|
2014-01-13 07:10:59 +08:00
|
|
|
return L"end of the statement";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case parse_token_type_terminate: {
|
2014-10-14 15:37:01 +08:00
|
|
|
return L"end of the input";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
default: { return format_string(L"a %ls", token_type_description(type)); }
|
2014-01-13 07:10:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
static wcstring block_type_user_presentable_description(parse_token_type_t type) {
|
|
|
|
switch (type) {
|
|
|
|
case symbol_for_header: {
|
2014-10-14 15:37:01 +08:00
|
|
|
return L"for loop";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case symbol_while_header: {
|
2014-10-14 15:37:01 +08:00
|
|
|
return L"while loop";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case symbol_function_header: {
|
2014-10-14 15:37:01 +08:00
|
|
|
return L"function definition";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case symbol_begin_header: {
|
2014-10-14 15:37:01 +08:00
|
|
|
return L"begin";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case symbol_if_statement: {
|
2014-10-14 15:37:01 +08:00
|
|
|
return L"if statement";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case symbol_switch_statement: {
|
2014-10-14 15:37:01 +08:00
|
|
|
return L"switch statement";
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
default: { return token_type_description(type); }
|
2014-10-14 15:37:01 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Returns a string description of the given parse node.
|
|
|
|
wcstring parse_node_t::describe() const {
|
2015-12-16 06:59:03 +08:00
|
|
|
wcstring result = token_type_description(this->type);
|
2014-01-13 07:10:59 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Returns a string description of the given parse token.
|
|
|
|
wcstring parse_token_t::describe() const {
|
2014-01-13 07:10:59 +08:00
|
|
|
wcstring result = token_type_description(type);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (keyword != parse_keyword_none) {
|
2016-04-11 10:08:07 +08:00
|
|
|
append_format(result, L" <%ls>", keyword_description(keyword));
|
2014-01-01 16:04:02 +08:00
|
|
|
}
|
2014-01-13 07:10:59 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// A string description appropriate for presentation to the user.
|
|
|
|
wcstring parse_token_t::user_presentable_description() const {
|
2014-01-13 07:10:59 +08:00
|
|
|
return token_type_user_presentable_description(type, keyword);
|
2014-01-01 16:04:02 +08:00
|
|
|
}
|
2013-06-23 17:09:46 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Convert from tokenizer_t's token type to a parse_token_t type.
|
|
|
|
static inline parse_token_type_t parse_token_type_from_tokenizer_token(
|
|
|
|
enum token_type tokenizer_token_type) {
|
2013-08-11 15:35:00 +08:00
|
|
|
parse_token_type_t result = token_type_invalid;
|
2016-05-03 07:09:46 +08:00
|
|
|
switch (tokenizer_token_type) {
|
|
|
|
case TOK_STRING: {
|
2013-08-11 15:35:00 +08:00
|
|
|
result = parse_token_type_string;
|
2013-06-02 13:14:47 +08:00
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case TOK_PIPE: {
|
2013-08-11 15:35:00 +08:00
|
|
|
result = parse_token_type_pipe;
|
2013-06-02 13:14:47 +08:00
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case TOK_END: {
|
2013-08-11 15:35:00 +08:00
|
|
|
result = parse_token_type_end;
|
2013-06-02 13:14:47 +08:00
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case TOK_BACKGROUND: {
|
2013-08-11 15:35:00 +08:00
|
|
|
result = parse_token_type_background;
|
2013-06-02 13:14:47 +08:00
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
2013-07-23 09:26:15 +08:00
|
|
|
case TOK_REDIRECT_OUT:
|
|
|
|
case TOK_REDIRECT_APPEND:
|
|
|
|
case TOK_REDIRECT_IN:
|
|
|
|
case TOK_REDIRECT_FD:
|
2016-05-03 07:09:46 +08:00
|
|
|
case TOK_REDIRECT_NOCLOB: {
|
2013-08-11 15:35:00 +08:00
|
|
|
result = parse_token_type_redirection;
|
2013-07-23 09:26:15 +08:00
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case TOK_ERROR: {
|
2013-08-11 15:35:00 +08:00
|
|
|
result = parse_special_type_tokenizer_error;
|
2013-08-09 06:06:46 +08:00
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case TOK_COMMENT: {
|
2013-08-11 15:35:00 +08:00
|
|
|
result = parse_special_type_comment;
|
2013-08-09 06:06:46 +08:00
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
default: {
|
|
|
|
fprintf(stderr, "Bad token type %d passed to %s\n", (int)tokenizer_token_type,
|
|
|
|
__FUNCTION__);
|
2013-06-02 13:14:47 +08:00
|
|
|
assert(0);
|
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
2013-06-02 13:14:47 +08:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-29 13:28:26 +08:00
|
|
|
#if 0
|
|
|
|
// Disabled for the 2.2.0 release: https://github.com/fish-shell/fish-shell/issues/1809.
|
|
|
|
|
|
|
|
/// Helper function for parse_dump_tree().
|
2016-05-03 07:09:46 +08:00
|
|
|
static void dump_tree_recursive(const parse_node_tree_t &nodes, const wcstring &src,
|
|
|
|
node_offset_t node_idx, size_t indent, wcstring *result,
|
|
|
|
size_t *line, node_offset_t *inout_first_node_not_dumped) {
|
2013-12-09 05:41:12 +08:00
|
|
|
assert(node_idx < nodes.size());
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Update first_node_not_dumped. This takes a bit of explanation. While it's true that a parse
|
|
|
|
// tree may be a "forest", its individual trees are "compact," meaning they are not
|
|
|
|
// interleaved. Thus we keep track of the largest node index as we descend a tree. One past the
|
|
|
|
// largest is the start of the next tree.
|
|
|
|
if (*inout_first_node_not_dumped <= node_idx) {
|
2014-01-04 08:54:34 +08:00
|
|
|
*inout_first_node_not_dumped = node_idx + 1;
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2013-12-09 05:41:12 +08:00
|
|
|
const parse_node_t &node = nodes.at(node_idx);
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2013-06-19 14:35:04 +08:00
|
|
|
const size_t spacesPerIndent = 2;
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Unindent statement lists by 1 to flatten them.
|
|
|
|
if (node.type == symbol_job_list || node.type == symbol_arguments_or_redirections_list) {
|
2013-06-19 14:35:04 +08:00
|
|
|
if (indent > 0) indent -= 1;
|
|
|
|
}
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2013-12-09 05:41:12 +08:00
|
|
|
append_format(*result, L"%2lu - %l2u ", *line, node_idx);
|
2016-05-03 07:09:46 +08:00
|
|
|
result->append(indent * spacesPerIndent, L' ');
|
|
|
|
;
|
2013-06-09 10:20:26 +08:00
|
|
|
result->append(node.describe());
|
2016-05-03 07:09:46 +08:00
|
|
|
if (node.child_count > 0) {
|
2013-06-09 10:20:26 +08:00
|
|
|
append_format(*result, L" <%lu children>", node.child_count);
|
2013-06-02 13:14:47 +08:00
|
|
|
}
|
2016-05-03 07:09:46 +08:00
|
|
|
if (node.has_comments()) {
|
2014-12-24 02:58:45 +08:00
|
|
|
append_format(*result, L" <has_comments>", node.child_count);
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (node.has_source() && node.type == parse_token_type_string) {
|
2013-12-09 05:41:12 +08:00
|
|
|
result->append(L": \"");
|
|
|
|
result->append(src, node.source_start, node.source_length);
|
|
|
|
result->append(L"\"");
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (node.type != parse_token_type_string) {
|
|
|
|
if (node.has_source()) {
|
|
|
|
append_format(*result, L" [%ld, %ld]", (long)node.source_start,
|
|
|
|
(long)node.source_length);
|
|
|
|
} else {
|
|
|
|
append_format(*result, L" [no src]", (long)node.source_start,
|
|
|
|
(long)node.source_length);
|
2013-08-09 06:06:46 +08:00
|
|
|
}
|
2013-06-02 13:14:47 +08:00
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2013-06-09 10:20:26 +08:00
|
|
|
result->push_back(L'\n');
|
|
|
|
++*line;
|
2016-05-03 07:09:46 +08:00
|
|
|
for (node_offset_t child_idx = node.child_start;
|
|
|
|
child_idx < node.child_start + node.child_count; child_idx++) {
|
|
|
|
dump_tree_recursive(nodes, src, child_idx, indent + 1, result, line,
|
|
|
|
inout_first_node_not_dumped);
|
2013-06-07 12:49:40 +08:00
|
|
|
}
|
2013-06-09 10:20:26 +08:00
|
|
|
}
|
2013-06-07 12:49:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Gives a debugging textual description of a parse tree. Note that this supports "parse forests"
|
|
|
|
/// too. That is, our tree may not really be a tree, but instead a collection of trees.
|
|
|
|
wcstring parse_dump_tree(const parse_node_tree_t &nodes, const wcstring &src) {
|
|
|
|
if (nodes.empty()) return L"(empty!)";
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2014-01-04 08:54:34 +08:00
|
|
|
node_offset_t first_node_not_dumped = 0;
|
2013-06-09 10:20:26 +08:00
|
|
|
size_t line = 0;
|
|
|
|
wcstring result;
|
2016-05-03 07:09:46 +08:00
|
|
|
while (first_node_not_dumped < nodes.size()) {
|
|
|
|
if (first_node_not_dumped > 0) {
|
2014-01-04 08:54:34 +08:00
|
|
|
result.append(L"---New Tree---\n");
|
|
|
|
}
|
2016-05-03 07:09:46 +08:00
|
|
|
dump_tree_recursive(nodes, src, first_node_not_dumped, 0, &result, &line,
|
|
|
|
&first_node_not_dumped);
|
2014-01-04 08:54:34 +08:00
|
|
|
}
|
2013-06-09 10:20:26 +08:00
|
|
|
return result;
|
|
|
|
}
|
2016-05-29 13:28:26 +08:00
|
|
|
#endif
|
2013-06-07 12:49:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Struct representing elements of the symbol stack, used in the internal state of the LL parser.
|
|
|
|
struct parse_stack_element_t {
|
2013-06-07 12:49:40 +08:00
|
|
|
enum parse_token_type_t type;
|
|
|
|
enum parse_keyword_t keyword;
|
2013-06-09 10:20:26 +08:00
|
|
|
node_offset_t node_idx;
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
explicit parse_stack_element_t(parse_token_type_t t, node_offset_t idx)
|
|
|
|
: type(t), keyword(parse_keyword_none), node_idx(idx) {}
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
explicit parse_stack_element_t(production_element_t e, node_offset_t idx)
|
|
|
|
: type(production_element_type(e)), keyword(production_element_keyword(e)), node_idx(idx) {}
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
wcstring describe(void) const {
|
2013-06-28 06:12:27 +08:00
|
|
|
wcstring result = token_type_description(type);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (keyword != parse_keyword_none) {
|
2016-04-11 10:08:07 +08:00
|
|
|
append_format(result, L" <%ls>", keyword_description(keyword));
|
2013-06-28 06:12:27 +08:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Returns a name that we can show to the user, e.g. "a command".
|
|
|
|
wcstring user_presentable_description(void) const {
|
2014-01-13 07:10:59 +08:00
|
|
|
return token_type_user_presentable_description(type, keyword);
|
2014-01-01 16:04:02 +08:00
|
|
|
}
|
2013-06-02 13:14:47 +08:00
|
|
|
};
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// The parser itself, private implementation of class parse_t. This is a hand-coded table-driven LL
|
|
|
|
/// parser. Most hand-coded LL parsers are recursive descent, but recursive descent parsers are
|
|
|
|
/// difficult to "pause", unlike table-driven parsers.
|
|
|
|
class parse_ll_t {
|
|
|
|
// Traditional symbol stack of the LL parser.
|
2013-08-11 15:35:00 +08:00
|
|
|
std::vector<parse_stack_element_t> symbol_stack;
|
2016-05-03 07:09:46 +08:00
|
|
|
// Parser output. This is a parse tree, but stored in an array.
|
2013-06-10 05:21:24 +08:00
|
|
|
parse_node_tree_t nodes;
|
2016-05-03 07:09:46 +08:00
|
|
|
// Whether we ran into a fatal error, including parse errors or tokenizer errors.
|
2013-06-16 06:21:35 +08:00
|
|
|
bool fatal_errored;
|
2016-05-03 07:09:46 +08:00
|
|
|
// Whether we should collect error messages or not.
|
2013-10-07 07:23:45 +08:00
|
|
|
bool should_generate_error_messages;
|
2016-05-03 07:09:46 +08:00
|
|
|
// List of errors we have encountered.
|
2013-06-16 06:21:35 +08:00
|
|
|
parse_error_list_t errors;
|
2016-05-03 07:09:46 +08:00
|
|
|
// The symbol stack can contain terminal types or symbols. Symbols go on to do productions, but
|
|
|
|
// terminal types are just matched against input tokens.
|
2013-08-11 15:35:00 +08:00
|
|
|
bool top_node_handle_terminal_types(parse_token_t token);
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2014-10-14 15:37:01 +08:00
|
|
|
void parse_error_unexpected_token(const wchar_t *expected, parse_token_t token);
|
2013-12-09 13:54:06 +08:00
|
|
|
void parse_error(parse_token_t token, parse_error_code_t code, const wchar_t *format, ...);
|
2016-05-03 07:09:46 +08:00
|
|
|
void parse_error_at_location(size_t location, parse_error_code_t code, const wchar_t *format,
|
|
|
|
...);
|
2014-01-01 16:04:02 +08:00
|
|
|
void parse_error_failed_production(struct parse_stack_element_t &elem, parse_token_t token);
|
2013-12-09 13:54:06 +08:00
|
|
|
void parse_error_unbalancing_token(parse_token_t token);
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Reports an error for an unclosed block, e.g. 'begin;'. Returns true on success, false on
|
|
|
|
// failure (e.g. it is not an unclosed block).
|
2014-10-14 15:37:01 +08:00
|
|
|
bool report_error_for_unclosed_block();
|
|
|
|
|
2016-05-29 13:28:26 +08:00
|
|
|
// void dump_stack(void) const;
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Get the node corresponding to the top element of the stack.
|
|
|
|
parse_node_t &node_for_top_symbol() {
|
2016-10-24 04:58:12 +08:00
|
|
|
PARSE_ASSERT(!symbol_stack.empty()); //!OCLINT(multiple unary operator)
|
2013-06-09 10:20:26 +08:00
|
|
|
const parse_stack_element_t &top_symbol = symbol_stack.back();
|
2014-04-28 08:23:19 +08:00
|
|
|
PARSE_ASSERT(top_symbol.node_idx != NODE_OFFSET_INVALID);
|
2013-06-09 10:20:26 +08:00
|
|
|
PARSE_ASSERT(top_symbol.node_idx < nodes.size());
|
|
|
|
return nodes.at(top_symbol.node_idx);
|
|
|
|
}
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Pop from the top of the symbol stack, then push the given production, updating node counts.
|
|
|
|
/// Note that production_t has type "pointer to array" so some care is required.
|
|
|
|
inline void symbol_stack_pop_push_production(const production_t *production) {
|
2013-07-29 06:19:38 +08:00
|
|
|
bool logit = false;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (logit) {
|
2013-07-29 06:19:38 +08:00
|
|
|
size_t count = 0;
|
|
|
|
fprintf(stderr, "Applying production:\n");
|
2016-05-03 07:09:46 +08:00
|
|
|
for (size_t i = 0; i < MAX_SYMBOLS_PER_PRODUCTION; i++) {
|
2013-07-29 06:19:38 +08:00
|
|
|
production_element_t elem = (*production)[i];
|
2016-05-03 07:09:46 +08:00
|
|
|
if (production_element_is_valid(elem)) {
|
2013-07-29 06:19:38 +08:00
|
|
|
parse_token_type_t type = production_element_type(elem);
|
|
|
|
parse_keyword_t keyword = production_element_keyword(elem);
|
2016-04-11 10:08:07 +08:00
|
|
|
fprintf(stderr, "\t%ls <%ls>\n", token_type_description(type),
|
|
|
|
keyword_description(keyword));
|
2013-07-29 06:19:38 +08:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
2016-05-03 07:09:46 +08:00
|
|
|
if (!count) fprintf(stderr, "\t<empty>\n");
|
2013-06-19 14:35:04 +08:00
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Get the parent index. But we can't get the parent parse node yet, since it may be made
|
|
|
|
// invalid by adding children.
|
2014-03-26 11:06:34 +08:00
|
|
|
const node_offset_t parent_node_idx = symbol_stack.back().node_idx;
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Add the children. Confusingly, we want our nodes to be in forwards order (last token
|
|
|
|
// last, so dumps look nice), but the symbols should be reverse order (last token first, so
|
|
|
|
// it's lowest on the stack)
|
2014-03-26 11:06:34 +08:00
|
|
|
const size_t child_start_big = nodes.size();
|
|
|
|
assert(child_start_big < NODE_OFFSET_INVALID);
|
|
|
|
node_offset_t child_start = static_cast<node_offset_t>(child_start_big);
|
2014-04-01 01:01:39 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// To avoid constructing multiple nodes, we make a single one that we modify.
|
2014-03-26 11:06:34 +08:00
|
|
|
parse_node_t representative_child(token_type_invalid);
|
|
|
|
representative_child.parent = parent_node_idx;
|
2014-04-01 01:01:39 +08:00
|
|
|
|
2014-03-26 11:06:34 +08:00
|
|
|
node_offset_t child_count = 0;
|
2016-05-03 07:09:46 +08:00
|
|
|
for (size_t i = 0; i < MAX_SYMBOLS_PER_PRODUCTION; i++) {
|
2013-07-29 06:19:38 +08:00
|
|
|
production_element_t elem = (*production)[i];
|
2016-05-03 07:09:46 +08:00
|
|
|
if (!production_element_is_valid(elem)) {
|
|
|
|
break; // all done, bail out
|
2013-08-11 15:35:00 +08:00
|
|
|
}
|
2014-03-26 11:06:34 +08:00
|
|
|
|
|
|
|
// Append the parse node.
|
|
|
|
representative_child.type = production_element_type(elem);
|
|
|
|
nodes.push_back(representative_child);
|
|
|
|
child_count++;
|
2013-07-29 06:19:38 +08:00
|
|
|
}
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Update the parent.
|
2013-07-29 06:19:38 +08:00
|
|
|
parse_node_t &parent_node = nodes.at(parent_node_idx);
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Should have no children yet.
|
2013-07-29 06:19:38 +08:00
|
|
|
PARSE_ASSERT(parent_node.child_count == 0);
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Tell the node about its children.
|
2013-07-29 06:19:38 +08:00
|
|
|
parent_node.child_start = child_start;
|
|
|
|
parent_node.child_count = child_count;
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Replace the top of the stack with new stack elements corresponding to our new nodes. Note
|
|
|
|
// that these go in reverse order.
|
2013-06-07 12:49:40 +08:00
|
|
|
symbol_stack.pop_back();
|
2013-07-29 06:19:38 +08:00
|
|
|
symbol_stack.reserve(symbol_stack.size() + child_count);
|
2014-03-26 11:06:34 +08:00
|
|
|
node_offset_t idx = child_count;
|
2016-05-03 07:09:46 +08:00
|
|
|
while (idx--) {
|
2013-07-29 06:19:38 +08:00
|
|
|
production_element_t elem = (*production)[idx];
|
|
|
|
PARSE_ASSERT(production_element_is_valid(elem));
|
|
|
|
symbol_stack.push_back(parse_stack_element_t(elem, child_start + idx));
|
2013-07-11 14:45:09 +08:00
|
|
|
}
|
|
|
|
}
|
2013-07-22 06:22:11 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
public:
|
|
|
|
// Constructor
|
|
|
|
explicit parse_ll_t(enum parse_token_type_t goal)
|
|
|
|
: fatal_errored(false), should_generate_error_messages(true) {
|
2013-08-11 15:35:00 +08:00
|
|
|
this->symbol_stack.reserve(16);
|
|
|
|
this->nodes.reserve(64);
|
2014-03-03 08:03:05 +08:00
|
|
|
this->reset_symbols_and_nodes(goal);
|
2013-08-11 15:35:00 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Input
|
2013-10-10 06:57:10 +08:00
|
|
|
void accept_tokens(parse_token_t token1, parse_token_t token2);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Report tokenizer errors.
|
2015-08-11 09:30:44 +08:00
|
|
|
void report_tokenizer_error(const tok_t &tok);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Indicate if we hit a fatal error.
|
|
|
|
bool has_fatal_error(void) const { return this->fatal_errored; }
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Indicate whether we want to generate error messages.
|
|
|
|
void set_should_generate_error_messages(bool flag) {
|
2013-10-07 07:23:45 +08:00
|
|
|
this->should_generate_error_messages = flag;
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Clear the parse symbol stack (but not the node tree). Add a node of the given type as the
|
|
|
|
/// goal node. This is called from the constructor.
|
2014-03-03 08:03:05 +08:00
|
|
|
void reset_symbols(enum parse_token_type_t goal);
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Clear the parse symbol stack and the node tree. Add a node of the given type as the goal
|
|
|
|
/// node. This is called from the constructor.
|
2014-03-03 08:03:05 +08:00
|
|
|
void reset_symbols_and_nodes(enum parse_token_type_t goal);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Once parsing is complete, determine the ranges of intermediate nodes.
|
2013-08-11 15:35:00 +08:00
|
|
|
void determine_node_ranges();
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Acquire output after parsing. This transfers directly from within self.
|
2013-08-11 15:35:00 +08:00
|
|
|
void acquire_output(parse_node_tree_t *output, parse_error_list_t *errors);
|
2013-06-02 13:14:47 +08:00
|
|
|
};
|
|
|
|
|
2016-05-29 13:28:26 +08:00
|
|
|
#if 0
|
2016-05-03 07:09:46 +08:00
|
|
|
void parse_ll_t::dump_stack(void) const {
|
|
|
|
// Walk backwards from the top, looking for parents.
|
2016-07-01 17:48:50 +08:00
|
|
|
wcstring_list_t stack_lines;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (symbol_stack.empty()) {
|
2016-07-01 17:48:50 +08:00
|
|
|
stack_lines.push_back(L"(empty)");
|
2016-05-03 07:09:46 +08:00
|
|
|
} else {
|
2013-06-28 06:12:27 +08:00
|
|
|
node_offset_t child = symbol_stack.back().node_idx;
|
|
|
|
node_offset_t cursor = child;
|
2016-07-01 17:48:50 +08:00
|
|
|
stack_lines.push_back(nodes.at(cursor).describe());
|
2016-05-03 07:09:46 +08:00
|
|
|
while (cursor--) {
|
2013-06-28 06:12:27 +08:00
|
|
|
const parse_node_t &node = nodes.at(cursor);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (node.child_start <= child && node.child_start + node.child_count > child) {
|
2016-07-01 17:48:50 +08:00
|
|
|
stack_lines.push_back(node.describe());
|
2013-06-28 06:12:27 +08:00
|
|
|
child = cursor;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-04-05 05:34:28 +08:00
|
|
|
fprintf(stderr, "Stack dump (%zu elements):\n", symbol_stack.size());
|
2016-07-01 17:48:50 +08:00
|
|
|
for (size_t idx = 0; idx < stack_lines.size(); idx++) {
|
|
|
|
fprintf(stderr, " %ls\n", stack_lines.at(idx).c_str());
|
2013-06-28 06:12:27 +08:00
|
|
|
}
|
|
|
|
}
|
2016-05-29 13:28:26 +08:00
|
|
|
#endif
|
2013-06-28 06:12:27 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Give each node a source range equal to the union of the ranges of its children. Terminal nodes
|
|
|
|
// already have source ranges (and no children). Since children always appear after their parents,
|
|
|
|
// we can implement this very simply by walking backwards. We then do a second pass to give empty
|
|
|
|
// nodes an empty source range (but with a valid offset). We do this by walking forward. If a child
|
|
|
|
// of a node has an invalid source range, we set it equal to the end of the source range of its
|
|
|
|
// previous child.
|
|
|
|
void parse_ll_t::determine_node_ranges(void) {
|
2013-08-09 06:06:46 +08:00
|
|
|
size_t idx = nodes.size();
|
2016-05-03 07:09:46 +08:00
|
|
|
while (idx--) {
|
2014-03-26 11:06:34 +08:00
|
|
|
parse_node_t *parent = &nodes[idx];
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2013-08-09 06:06:46 +08:00
|
|
|
// Skip nodes that already have a source range. These are terminal nodes.
|
2016-05-03 07:09:46 +08:00
|
|
|
if (parent->source_start != SOURCE_OFFSET_INVALID) continue;
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2013-08-09 06:06:46 +08:00
|
|
|
// Ok, this node needs a source range. Get all of its children, and then set its range.
|
2016-05-03 07:09:46 +08:00
|
|
|
source_offset_t min_start = SOURCE_OFFSET_INVALID,
|
|
|
|
max_end = 0; // note SOURCE_OFFSET_INVALID is huge
|
|
|
|
for (node_offset_t i = 0; i < parent->child_count; i++) {
|
2013-08-09 06:06:46 +08:00
|
|
|
const parse_node_t &child = nodes.at(parent->child_offset(i));
|
2016-05-03 07:09:46 +08:00
|
|
|
if (child.has_source()) {
|
2013-10-14 07:58:40 +08:00
|
|
|
min_start = std::min(min_start, child.source_start);
|
|
|
|
max_end = std::max(max_end, child.source_start + child.source_length);
|
|
|
|
}
|
2013-08-09 06:06:46 +08:00
|
|
|
}
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (min_start != SOURCE_OFFSET_INVALID) {
|
2013-08-09 06:06:46 +08:00
|
|
|
assert(max_end >= min_start);
|
|
|
|
parent->source_start = min_start;
|
|
|
|
parent->source_length = max_end - min_start;
|
|
|
|
}
|
|
|
|
}
|
2014-09-30 02:29:50 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Forward pass.
|
2014-09-30 02:29:50 +08:00
|
|
|
size_t size = nodes.size();
|
2016-05-03 07:09:46 +08:00
|
|
|
for (idx = 0; idx < size; idx++) {
|
|
|
|
// Since we populate the source range based on the sibling node, it's simpler to walk over
|
|
|
|
// the children of each node. We keep a running "child_source_cursor" which is meant to be
|
|
|
|
// the end of the child's source range. It's initially set to the beginning of the parent'
|
|
|
|
// source range.
|
2014-09-30 02:29:50 +08:00
|
|
|
parse_node_t *parent = &nodes[idx];
|
2016-05-03 07:09:46 +08:00
|
|
|
// If the parent doesn't have a valid source range, then none of its children will either;
|
|
|
|
// skip it entirely.
|
|
|
|
if (parent->source_start == SOURCE_OFFSET_INVALID) {
|
2014-09-30 02:29:50 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
source_offset_t child_source_cursor = parent->source_start;
|
2016-05-03 07:09:46 +08:00
|
|
|
for (size_t child_idx = 0; child_idx < parent->child_count; child_idx++) {
|
2014-09-30 02:29:50 +08:00
|
|
|
parse_node_t *child = &nodes[parent->child_start + child_idx];
|
2016-05-03 07:09:46 +08:00
|
|
|
if (child->source_start == SOURCE_OFFSET_INVALID) {
|
2014-09-30 02:29:50 +08:00
|
|
|
child->source_start = child_source_cursor;
|
|
|
|
}
|
|
|
|
child_source_cursor = child->source_start + child->source_length;
|
|
|
|
}
|
|
|
|
}
|
2013-08-09 06:06:46 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
void parse_ll_t::acquire_output(parse_node_tree_t *output, parse_error_list_t *errors) {
|
|
|
|
if (output != NULL) {
|
2014-03-26 03:44:21 +08:00
|
|
|
output->swap(this->nodes);
|
2013-08-11 15:35:00 +08:00
|
|
|
}
|
|
|
|
this->nodes.clear();
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (errors != NULL) {
|
2014-03-26 03:44:21 +08:00
|
|
|
errors->swap(this->errors);
|
2013-08-11 15:35:00 +08:00
|
|
|
}
|
|
|
|
this->errors.clear();
|
|
|
|
this->symbol_stack.clear();
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
void parse_ll_t::parse_error(parse_token_t token, parse_error_code_t code, const wchar_t *fmt,
|
|
|
|
...) {
|
2013-10-16 16:17:27 +08:00
|
|
|
this->fatal_errored = true;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (this->should_generate_error_messages) {
|
|
|
|
// this->dump_stack();
|
2013-10-16 16:17:27 +08:00
|
|
|
parse_error_t err;
|
|
|
|
|
|
|
|
va_list va;
|
|
|
|
va_start(va, fmt);
|
|
|
|
err.text = vformat_string(fmt, va);
|
2013-12-09 13:54:06 +08:00
|
|
|
err.code = code;
|
2013-10-16 16:17:27 +08:00
|
|
|
va_end(va);
|
|
|
|
|
2013-10-07 07:23:45 +08:00
|
|
|
err.source_start = token.source_start;
|
|
|
|
err.source_length = token.source_length;
|
|
|
|
this->errors.push_back(err);
|
|
|
|
}
|
2013-06-02 13:14:47 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
void parse_ll_t::parse_error_at_location(size_t source_location, parse_error_code_t code,
|
|
|
|
const wchar_t *fmt, ...) {
|
2014-10-14 15:37:01 +08:00
|
|
|
this->fatal_errored = true;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (this->should_generate_error_messages) {
|
|
|
|
// this->dump_stack();
|
2014-10-14 15:37:01 +08:00
|
|
|
parse_error_t err;
|
|
|
|
|
|
|
|
va_list va;
|
|
|
|
va_start(va, fmt);
|
|
|
|
err.text = vformat_string(fmt, va);
|
|
|
|
err.code = code;
|
|
|
|
va_end(va);
|
|
|
|
|
|
|
|
err.source_start = source_location;
|
|
|
|
err.source_length = 0;
|
|
|
|
this->errors.push_back(err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-09 13:54:06 +08:00
|
|
|
// Unbalancing token. This includes 'else' or 'case' or 'end' outside of the appropriate block
|
2016-05-03 07:09:46 +08:00
|
|
|
// This essentially duplicates some logic from resolving the production for symbol_statement_list -
|
|
|
|
// yuck.
|
|
|
|
void parse_ll_t::parse_error_unbalancing_token(parse_token_t token) {
|
2013-12-09 13:54:06 +08:00
|
|
|
this->fatal_errored = true;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (this->should_generate_error_messages) {
|
|
|
|
switch (token.keyword) {
|
|
|
|
case parse_keyword_end: {
|
2013-12-09 13:54:06 +08:00
|
|
|
this->parse_error(token, parse_error_unbalancing_end, L"'end' outside of a block");
|
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case parse_keyword_else: {
|
|
|
|
this->parse_error(token, parse_error_unbalancing_else,
|
|
|
|
L"'else' builtin not inside of if block");
|
2013-12-09 13:54:06 +08:00
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case parse_keyword_case: {
|
|
|
|
this->parse_error(token, parse_error_unbalancing_case,
|
|
|
|
L"'case' builtin not inside of switch block");
|
2013-12-09 13:54:06 +08:00
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
default: {
|
|
|
|
// At the moment, this case should only be hit if you parse a
|
|
|
|
// freestanding_argument_list. For example, 'complete -c foo -a 'one & three'.
|
|
|
|
// Hackish error message for that case.
|
|
|
|
if (!symbol_stack.empty() &&
|
|
|
|
symbol_stack.back().type == symbol_freestanding_argument_list) {
|
|
|
|
this->parse_error(
|
|
|
|
token, parse_error_generic, L"Expected %ls, but found %ls",
|
|
|
|
token_type_user_presentable_description(symbol_argument).c_str(),
|
|
|
|
token.user_presentable_description().c_str());
|
|
|
|
} else {
|
|
|
|
this->parse_error(token, parse_error_generic, L"Did not expect %ls",
|
|
|
|
token.user_presentable_description().c_str());
|
2014-03-28 02:34:18 +08:00
|
|
|
}
|
2013-12-09 13:54:06 +08:00
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
2013-12-09 13:54:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// This is a 'generic' parse error when we can't match the top of the stack element.
|
|
|
|
void parse_ll_t::parse_error_failed_production(struct parse_stack_element_t &stack_elem,
|
|
|
|
parse_token_t token) {
|
2014-01-13 18:24:11 +08:00
|
|
|
fatal_errored = true;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (this->should_generate_error_messages) {
|
2014-01-13 18:24:11 +08:00
|
|
|
bool done = false;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Check for ||.
|
|
|
|
if (token.type == parse_token_type_pipe && token.source_start > 0) {
|
|
|
|
// Here we wanted a statement and instead got a pipe. See if this is a double pipe: foo
|
|
|
|
// || bar. If so, we have a special error message.
|
|
|
|
const parse_node_t *prev_pipe = nodes.find_node_matching_source_location(
|
|
|
|
parse_token_type_pipe, token.source_start - 1, NULL);
|
|
|
|
if (prev_pipe != NULL) {
|
|
|
|
// The pipe of the previous job abuts our current token. So we have ||.
|
2015-04-30 07:53:02 +08:00
|
|
|
this->parse_error(token, parse_error_double_pipe, ERROR_BAD_OR);
|
2014-01-13 18:24:11 +08:00
|
|
|
done = true;
|
|
|
|
}
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Check for &&.
|
|
|
|
if (!done && token.type == parse_token_type_background && token.source_start > 0) {
|
|
|
|
// Check to see if there was a previous token_background.
|
|
|
|
const parse_node_t *prev_background = nodes.find_node_matching_source_location(
|
|
|
|
parse_token_type_background, token.source_start - 1, NULL);
|
|
|
|
if (prev_background != NULL) {
|
|
|
|
// We have &&.
|
2015-04-30 07:53:02 +08:00
|
|
|
this->parse_error(token, parse_error_double_background, ERROR_BAD_AND);
|
2014-01-13 18:24:11 +08:00
|
|
|
done = true;
|
|
|
|
}
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (!done) {
|
2014-01-13 18:24:11 +08:00
|
|
|
const wcstring expected = stack_elem.user_presentable_description();
|
2014-10-14 15:37:01 +08:00
|
|
|
this->parse_error_unexpected_token(expected.c_str(), token);
|
2014-01-13 18:24:11 +08:00
|
|
|
}
|
|
|
|
}
|
2014-01-01 16:04:02 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
void parse_ll_t::report_tokenizer_error(const tok_t &tok) {
|
2014-01-14 16:01:26 +08:00
|
|
|
parse_error_code_t parse_error_code;
|
2016-05-03 07:09:46 +08:00
|
|
|
switch (tok.error) {
|
|
|
|
case TOK_UNTERMINATED_QUOTE: {
|
2014-01-14 16:01:26 +08:00
|
|
|
parse_error_code = parse_error_tokenizer_unterminated_quote;
|
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case TOK_UNTERMINATED_SUBSHELL: {
|
2014-01-14 16:01:26 +08:00
|
|
|
parse_error_code = parse_error_tokenizer_unterminated_subshell;
|
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case TOK_UNTERMINATED_SLICE: {
|
2015-08-11 10:30:21 +08:00
|
|
|
parse_error_code = parse_error_tokenizer_unterminated_slice;
|
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
case TOK_UNTERMINATED_ESCAPE: {
|
2014-01-14 16:01:26 +08:00
|
|
|
parse_error_code = parse_error_tokenizer_unterminated_escape;
|
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
2014-01-14 16:01:26 +08:00
|
|
|
case TOK_OTHER:
|
2016-05-03 07:09:46 +08:00
|
|
|
default: {
|
2014-01-14 16:01:26 +08:00
|
|
|
parse_error_code = parse_error_tokenizer_other;
|
|
|
|
break;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
2014-01-14 16:01:26 +08:00
|
|
|
}
|
2016-05-03 07:09:46 +08:00
|
|
|
this->parse_error_at_location(tok.offset + tok.error_offset, parse_error_code, L"%ls",
|
|
|
|
tok.text.c_str());
|
2013-12-09 13:54:06 +08:00
|
|
|
}
|
2013-07-23 09:26:15 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
void parse_ll_t::parse_error_unexpected_token(const wchar_t *expected, parse_token_t token) {
|
2013-06-16 06:21:35 +08:00
|
|
|
fatal_errored = true;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (this->should_generate_error_messages) {
|
|
|
|
this->parse_error(token, parse_error_generic, L"Expected %ls, but instead found %ls",
|
|
|
|
expected, token.user_presentable_description().c_str());
|
2013-10-07 07:23:45 +08:00
|
|
|
}
|
2013-06-02 13:14:47 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
void parse_ll_t::reset_symbols(enum parse_token_type_t goal) {
|
|
|
|
// Add a new goal node, and then reset our symbol list to point at it.
|
2014-03-26 11:06:34 +08:00
|
|
|
node_offset_t where = static_cast<node_offset_t>(nodes.size());
|
2014-03-03 08:03:05 +08:00
|
|
|
nodes.push_back(parse_node_t(goal));
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2013-08-09 06:06:46 +08:00
|
|
|
symbol_stack.clear();
|
2016-05-03 07:09:46 +08:00
|
|
|
symbol_stack.push_back(parse_stack_element_t(goal, where)); // goal token
|
2013-08-09 06:06:46 +08:00
|
|
|
this->fatal_errored = false;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Reset both symbols and nodes.
|
|
|
|
void parse_ll_t::reset_symbols_and_nodes(enum parse_token_type_t goal) {
|
2013-08-11 15:35:00 +08:00
|
|
|
nodes.clear();
|
2014-03-03 08:03:05 +08:00
|
|
|
this->reset_symbols(goal);
|
2013-08-11 15:35:00 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
static bool type_is_terminal_type(parse_token_type_t type) {
|
|
|
|
switch (type) {
|
2013-08-11 15:35:00 +08:00
|
|
|
case parse_token_type_string:
|
|
|
|
case parse_token_type_pipe:
|
|
|
|
case parse_token_type_redirection:
|
|
|
|
case parse_token_type_background:
|
|
|
|
case parse_token_type_end:
|
2016-05-03 07:09:46 +08:00
|
|
|
case parse_token_type_terminate: {
|
2013-08-11 15:35:00 +08:00
|
|
|
return true;
|
2016-05-03 07:09:46 +08:00
|
|
|
}
|
|
|
|
default: { return false; }
|
2013-08-11 15:35:00 +08:00
|
|
|
}
|
|
|
|
}
|
2013-08-09 06:06:46 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
bool parse_ll_t::report_error_for_unclosed_block() {
|
2014-10-14 15:37:01 +08:00
|
|
|
bool reported_error = false;
|
2016-05-03 07:09:46 +08:00
|
|
|
// Unclosed block, for example, 'while true ; '. We want to show the block node that opened it.
|
2014-10-14 15:37:01 +08:00
|
|
|
const parse_node_t &top_node = this->node_for_top_symbol();
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Hacktastic. We want to point at the source location of the block, but our block doesn't have
|
|
|
|
// a source range yet - only the terminal tokens do. So get the block statement corresponding to
|
|
|
|
// this end command. In general this block may be of a variety of types: if_statement,
|
|
|
|
// switch_statement, etc., each with different node structures. But keep descending the first
|
|
|
|
// child and eventually you hit a keyword: begin, if, etc. That's the keyword we care about.
|
2014-10-14 15:37:01 +08:00
|
|
|
const parse_node_t *end_command = this->nodes.get_parent(top_node, symbol_end_command);
|
|
|
|
const parse_node_t *block_node = end_command ? this->nodes.get_parent(*end_command) : NULL;
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (block_node && block_node->type == symbol_block_statement) {
|
|
|
|
// Get the header.
|
2014-10-14 15:37:01 +08:00
|
|
|
block_node = this->nodes.get_child(*block_node, 0, symbol_block_header);
|
2016-05-03 07:09:46 +08:00
|
|
|
block_node = this->nodes.get_child(*block_node, 0); // specific statement
|
|
|
|
}
|
2016-10-30 11:51:03 +08:00
|
|
|
if (block_node == NULL) {
|
|
|
|
return reported_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
// block_node is now an if_statement, switch_statement, for_header, while_header,
|
|
|
|
// function_header, or begin_header.
|
|
|
|
//
|
|
|
|
// Hackish: descend down the first node until we reach the bottom. This will be a keyword
|
|
|
|
// node like SWITCH, which will have the source range. Ordinarily the source range would be
|
|
|
|
// known by the parent node too, but we haven't completed parsing yet, so we haven't yet
|
|
|
|
// propagated source ranges.
|
|
|
|
const parse_node_t *cursor = block_node;
|
|
|
|
while (cursor->child_count > 0) {
|
|
|
|
cursor = this->nodes.get_child(*cursor, 0);
|
|
|
|
assert(cursor != NULL);
|
|
|
|
}
|
|
|
|
if (cursor->source_start != NODE_OFFSET_INVALID) {
|
|
|
|
const wcstring node_desc = block_type_user_presentable_description(block_node->type);
|
|
|
|
this->parse_error_at_location(cursor->source_start, parse_error_generic,
|
|
|
|
L"Missing end to balance this %ls", node_desc.c_str());
|
|
|
|
reported_error = true;
|
2014-10-14 15:37:01 +08:00
|
|
|
}
|
|
|
|
return reported_error;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
bool parse_ll_t::top_node_handle_terminal_types(parse_token_t token) {
|
2016-10-24 04:58:12 +08:00
|
|
|
PARSE_ASSERT(!symbol_stack.empty()); //!OCLINT(multiple unary operator)
|
2013-06-09 10:20:26 +08:00
|
|
|
PARSE_ASSERT(token.type >= FIRST_PARSE_TOKEN_TYPE);
|
|
|
|
parse_stack_element_t &stack_top = symbol_stack.back();
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-10-30 11:51:03 +08:00
|
|
|
if (!type_is_terminal_type(stack_top.type)) {
|
|
|
|
return false; // was not handled
|
|
|
|
}
|
|
|
|
|
|
|
|
// The top of the stack is terminal. We are going to handle this (because we can't produce
|
|
|
|
// from a terminal type).
|
|
|
|
|
|
|
|
// Now see if we actually matched
|
|
|
|
bool matched = false;
|
|
|
|
if (stack_top.type == token.type) {
|
|
|
|
if (stack_top.type == parse_token_type_string) {
|
|
|
|
// We matched if the keywords match, or no keyword was required.
|
|
|
|
matched =
|
|
|
|
(stack_top.keyword == parse_keyword_none || stack_top.keyword == token.keyword);
|
2016-05-03 07:09:46 +08:00
|
|
|
} else {
|
2016-10-30 11:51:03 +08:00
|
|
|
// For other types, we only require that the types match.
|
|
|
|
matched = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (matched) {
|
|
|
|
// Success. Tell the node that it matched this token, and what its source range is in
|
|
|
|
// the parse phase, we only set source ranges for terminal types. We propagate ranges to
|
|
|
|
// parent nodes afterwards.
|
|
|
|
parse_node_t &node = node_for_top_symbol();
|
|
|
|
node.keyword = token.keyword;
|
|
|
|
node.source_start = token.source_start;
|
|
|
|
node.source_length = token.source_length;
|
|
|
|
} else {
|
|
|
|
// Failure
|
|
|
|
if (stack_top.type == parse_token_type_string && token.type == parse_token_type_string) {
|
|
|
|
// Keyword failure. We should unify this with the 'matched' computation above.
|
|
|
|
assert(stack_top.keyword != parse_keyword_none && stack_top.keyword != token.keyword);
|
|
|
|
|
|
|
|
// Check to see which keyword we got which was considered wrong.
|
|
|
|
switch (token.keyword) {
|
|
|
|
// Some keywords are only valid in certain contexts. If this cascaded all the
|
|
|
|
// way down through the outermost job_list, it was not in a valid context.
|
|
|
|
case parse_keyword_case:
|
|
|
|
case parse_keyword_end:
|
|
|
|
case parse_keyword_else: {
|
|
|
|
this->parse_error_unbalancing_token(token);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case parse_keyword_none: {
|
|
|
|
// This is a random other string (not a keyword).
|
|
|
|
const wcstring expected = keyword_description(stack_top.keyword);
|
|
|
|
this->parse_error(token, parse_error_generic, L"Expected keyword '%ls'",
|
|
|
|
expected.c_str());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: {
|
|
|
|
// Got a real keyword we can report.
|
|
|
|
const wcstring actual =
|
|
|
|
(token.keyword == parse_keyword_none ? token.describe()
|
|
|
|
: keyword_description(token.keyword));
|
|
|
|
const wcstring expected = keyword_description(stack_top.keyword);
|
|
|
|
this->parse_error(token, parse_error_generic,
|
|
|
|
L"Expected keyword '%ls', instead got keyword '%ls'",
|
|
|
|
expected.c_str(), actual.c_str());
|
|
|
|
break;
|
2013-12-09 13:54:06 +08:00
|
|
|
}
|
|
|
|
}
|
2016-10-30 11:51:03 +08:00
|
|
|
} else if (stack_top.keyword == parse_keyword_end &&
|
|
|
|
token.type == parse_token_type_terminate &&
|
|
|
|
this->report_error_for_unclosed_block()) {
|
2016-11-02 11:42:02 +08:00
|
|
|
; // handled by report_error_for_unclosed_block
|
2016-10-30 11:51:03 +08:00
|
|
|
} else {
|
|
|
|
const wcstring expected = stack_top.user_presentable_description();
|
|
|
|
this->parse_error_unexpected_token(expected.c_str(), token);
|
2013-06-23 17:09:46 +08:00
|
|
|
}
|
2013-06-02 13:14:47 +08:00
|
|
|
}
|
2016-10-30 11:51:03 +08:00
|
|
|
|
|
|
|
// We handled the token, so pop the symbol stack.
|
|
|
|
symbol_stack.pop_back();
|
|
|
|
return true;
|
2013-06-02 13:14:47 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
void parse_ll_t::accept_tokens(parse_token_t token1, parse_token_t token2) {
|
2013-07-23 09:26:15 +08:00
|
|
|
bool logit = false;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (logit) {
|
2013-10-10 06:57:10 +08:00
|
|
|
fprintf(stderr, "Accept token %ls\n", token1.describe().c_str());
|
2013-06-19 14:35:04 +08:00
|
|
|
}
|
2013-10-10 06:57:10 +08:00
|
|
|
PARSE_ASSERT(token1.type >= FIRST_PARSE_TOKEN_TYPE);
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2013-06-09 10:20:26 +08:00
|
|
|
bool consumed = false;
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Handle special types specially. Note that these are the only types that can be pushed if the
|
|
|
|
// symbol stack is empty.
|
|
|
|
if (token1.type == parse_special_type_parse_error ||
|
|
|
|
token1.type == parse_special_type_tokenizer_error ||
|
|
|
|
token1.type == parse_special_type_comment) {
|
|
|
|
// We set the special node's parent to the top of the stack. This means that we have an
|
|
|
|
// asymmetric relationship: the special node has a parent (which is the node we were trying
|
|
|
|
// to generate when we encountered the special node), but the parent node does not have the
|
|
|
|
// special node as a child. This means for example that parents don't have to worry about
|
|
|
|
// tracking any comment nodes, but we can still recover the parent from the comment.
|
2014-09-30 02:29:50 +08:00
|
|
|
parse_node_t special_node(token1.type);
|
|
|
|
special_node.parent = symbol_stack.back().node_idx;
|
|
|
|
special_node.source_start = token1.source_start;
|
|
|
|
special_node.source_length = token1.source_length;
|
|
|
|
nodes.push_back(special_node);
|
2013-08-09 06:06:46 +08:00
|
|
|
consumed = true;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Mark special flags.
|
|
|
|
if (token1.type == parse_special_type_comment) {
|
2014-12-24 02:58:45 +08:00
|
|
|
this->node_for_top_symbol().flags |= parse_node_flag_has_comments;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Tokenizer errors are fatal.
|
|
|
|
if (token1.type == parse_special_type_tokenizer_error) this->fatal_errored = true;
|
2013-08-09 06:06:46 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
while (!consumed && !this->fatal_errored) {
|
2016-10-24 04:58:12 +08:00
|
|
|
PARSE_ASSERT(!symbol_stack.empty()); //!OCLINT(multiple unary operator)
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (top_node_handle_terminal_types(token1)) {
|
|
|
|
if (logit) {
|
2013-10-10 06:57:10 +08:00
|
|
|
fprintf(stderr, "Consumed token %ls\n", token1.describe().c_str());
|
2013-06-23 17:09:46 +08:00
|
|
|
}
|
2016-06-15 10:21:50 +08:00
|
|
|
// consumed = true;
|
2013-06-09 10:20:26 +08:00
|
|
|
break;
|
|
|
|
}
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// top_node_match_token may indicate an error if our stack is empty.
|
|
|
|
if (this->fatal_errored) break;
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Get the production for the top of the stack.
|
2013-07-29 06:19:38 +08:00
|
|
|
parse_stack_element_t &stack_elem = symbol_stack.back();
|
|
|
|
parse_node_t &node = nodes.at(stack_elem.node_idx);
|
2015-12-20 10:09:41 +08:00
|
|
|
parse_node_tag_t tag = 0;
|
2016-05-03 07:09:46 +08:00
|
|
|
const production_t *production =
|
|
|
|
production_for_token(stack_elem.type, token1, token2, &tag);
|
2015-12-20 10:09:41 +08:00
|
|
|
node.tag = tag;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (production == NULL) {
|
2014-01-13 18:24:11 +08:00
|
|
|
parse_error_failed_production(stack_elem, token1);
|
2016-05-03 07:09:46 +08:00
|
|
|
// The above sets fatal_errored, which ends the loop.
|
|
|
|
} else {
|
2013-12-27 06:52:15 +08:00
|
|
|
bool is_terminate = (token1.type == parse_token_type_terminate);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// When a job_list encounters something like 'else', it returns an empty production to
|
|
|
|
// return control to the outer block. But if it's unbalanced, then we'll end up with an
|
|
|
|
// empty stack! So make sure that doesn't happen. This is the primary mechanism by which
|
|
|
|
// we detect e.g. unbalanced end. However, if we get a true terminate token, then we
|
|
|
|
// allow (expect) this to empty the stack.
|
|
|
|
if (symbol_stack.size() == 1 && production_is_empty(production) && !is_terminate) {
|
2013-12-09 13:54:06 +08:00
|
|
|
this->parse_error_unbalancing_token(token1);
|
|
|
|
break;
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Manipulate the symbol stack. Note that stack_elem is invalidated by popping the
|
|
|
|
// stack.
|
2013-07-29 06:44:09 +08:00
|
|
|
symbol_stack_pop_push_production(production);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Expect to not have an empty stack, unless this was the terminate type. Note we may
|
|
|
|
// not have an empty stack with the terminate type (i.e. incomplete input).
|
|
|
|
assert(is_terminate || !symbol_stack.empty());
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (symbol_stack.empty()) {
|
2013-12-27 06:52:15 +08:00
|
|
|
break;
|
|
|
|
}
|
2013-07-29 06:44:09 +08:00
|
|
|
}
|
2013-06-09 10:20:26 +08:00
|
|
|
}
|
|
|
|
}
|
2013-06-02 13:14:47 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Given an expanded string, returns any keyword it matches.
|
|
|
|
static parse_keyword_t keyword_with_name(const wchar_t *name) {
|
|
|
|
// Binary search on keyword_map. Start at 1 since 0 is keyword_none.
|
2014-10-16 03:49:02 +08:00
|
|
|
parse_keyword_t result = parse_keyword_none;
|
|
|
|
size_t left = 1, right = sizeof keyword_map / sizeof *keyword_map;
|
2016-05-03 07:09:46 +08:00
|
|
|
while (left < right) {
|
|
|
|
size_t mid = left + (right - left) / 2;
|
2014-10-16 03:49:02 +08:00
|
|
|
int cmp = wcscmp(name, keyword_map[mid].name);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (cmp < 0) {
|
|
|
|
right = mid; // name was smaller than mid
|
|
|
|
} else if (cmp > 0) {
|
|
|
|
left = mid + 1; // name was larger than mid
|
|
|
|
} else {
|
|
|
|
result = keyword_map[mid].keyword; // found it
|
2014-10-16 03:49:02 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
static bool is_keyword_char(wchar_t c) {
|
|
|
|
return (c >= L'a' && c <= L'z') || (c >= L'A' && c <= L'Z') || (c >= L'0' && c <= L'9') ||
|
|
|
|
c == L'\'' || c == L'"' || c == L'\\' || c == '\n';
|
2016-04-08 18:20:21 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Given a token, returns the keyword it matches, or parse_keyword_none.
|
|
|
|
static parse_keyword_t keyword_for_token(token_type tok, const wcstring &token) {
|
2014-10-16 03:49:02 +08:00
|
|
|
/* Only strings can be keywords */
|
2016-05-03 07:09:46 +08:00
|
|
|
if (tok != TOK_STRING) {
|
2014-10-16 03:49:02 +08:00
|
|
|
return parse_keyword_none;
|
|
|
|
}
|
2016-05-03 07:09:46 +08:00
|
|
|
|
|
|
|
// If tok_txt is clean (which most are), we can compare it directly. Otherwise we have to expand
|
|
|
|
// it. We only expand quotes, and we don't want to do expensive expansions like tilde
|
|
|
|
// expansions. So we do our own "cleanliness" check; if we find a character not in our allowed
|
|
|
|
// set we know it's not a keyword, and if we never find a quote we don't have to expand! Note
|
|
|
|
// that this lowercase set could be shrunk to be just the characters that are in keywords.
|
2013-06-09 10:20:26 +08:00
|
|
|
parse_keyword_t result = parse_keyword_none;
|
2014-10-16 03:49:02 +08:00
|
|
|
bool needs_expand = false, all_chars_valid = true;
|
2015-07-26 15:12:36 +08:00
|
|
|
const wchar_t *tok_txt = token.c_str();
|
2016-05-03 07:09:46 +08:00
|
|
|
for (size_t i = 0; tok_txt[i] != L'\0'; i++) {
|
2014-10-16 03:49:02 +08:00
|
|
|
wchar_t c = tok_txt[i];
|
2016-05-03 07:09:46 +08:00
|
|
|
if (!is_keyword_char(c)) {
|
2014-10-16 03:49:02 +08:00
|
|
|
all_chars_valid = false;
|
|
|
|
break;
|
|
|
|
}
|
2016-05-03 07:09:46 +08:00
|
|
|
// If we encounter a quote, we need expansion.
|
2016-04-08 18:20:21 +08:00
|
|
|
needs_expand = needs_expand || c == L'"' || c == L'\'' || c == L'\\';
|
2014-10-16 03:49:02 +08:00
|
|
|
}
|
2016-05-03 07:09:46 +08:00
|
|
|
|
|
|
|
if (all_chars_valid) {
|
|
|
|
// Expand if necessary.
|
|
|
|
if (!needs_expand) {
|
2014-10-16 03:49:02 +08:00
|
|
|
result = keyword_with_name(tok_txt);
|
2016-05-03 07:09:46 +08:00
|
|
|
} else {
|
2014-12-24 02:08:41 +08:00
|
|
|
wcstring storage;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (unescape_string(tok_txt, &storage, 0)) {
|
2014-10-16 03:49:02 +08:00
|
|
|
result = keyword_with_name(storage.c_str());
|
2013-06-09 10:20:26 +08:00
|
|
|
}
|
|
|
|
}
|
2013-06-07 12:49:40 +08:00
|
|
|
}
|
2013-06-09 10:20:26 +08:00
|
|
|
return result;
|
2013-06-07 12:49:40 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Placeholder invalid token.
|
|
|
|
static const parse_token_t kInvalidToken = {
|
|
|
|
token_type_invalid, parse_keyword_none, false, false, SOURCE_OFFSET_INVALID, 0};
|
2013-10-10 06:57:10 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Terminal token.
|
|
|
|
static const parse_token_t kTerminalToken = {
|
|
|
|
parse_token_type_terminate, parse_keyword_none, false, false, SOURCE_OFFSET_INVALID, 0};
|
2013-12-27 06:52:15 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
static inline bool is_help_argument(const wcstring &txt) { return contains(txt, L"-h", L"--help"); }
|
2014-01-14 18:29:53 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Return a new parse token, advancing the tokenizer.
|
|
|
|
static inline parse_token_t next_parse_token(tokenizer_t *tok, tok_t *token) {
|
|
|
|
if (!tok->next(token)) {
|
2013-12-27 06:52:15 +08:00
|
|
|
return kTerminalToken;
|
2013-10-10 06:57:10 +08:00
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2013-10-10 06:57:10 +08:00
|
|
|
parse_token_t result;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Set the type, keyword, and whether there's a dash prefix. Note that this is quite sketchy,
|
|
|
|
// because it ignores quotes. This is the historical behavior. For example, `builtin --names`
|
|
|
|
// lists builtins, but `builtin "--names"` attempts to run --names as a command. Amazingly as of
|
|
|
|
// this writing (10/12/13) nobody seems to have noticed this. Squint at it really hard and it
|
|
|
|
// even starts to look like a feature.
|
2015-07-26 15:12:36 +08:00
|
|
|
result.type = parse_token_type_from_tokenizer_token(token->type);
|
|
|
|
result.keyword = keyword_for_token(token->type, token->text);
|
|
|
|
result.has_dash_prefix = !token->text.empty() && token->text.at(0) == L'-';
|
|
|
|
result.is_help_argument = result.has_dash_prefix && is_help_argument(token->text);
|
2016-05-03 07:09:46 +08:00
|
|
|
|
|
|
|
// These assertions are totally bogus. Basically our tokenizer works in size_t but we work in
|
|
|
|
// uint32_t to save some space. If we have a source file larger than 4 GB, we'll probably just
|
|
|
|
// crash.
|
2015-07-26 15:12:36 +08:00
|
|
|
assert(token->offset < SOURCE_OFFSET_INVALID);
|
|
|
|
result.source_start = (source_offset_t)token->offset;
|
2016-05-03 07:09:46 +08:00
|
|
|
|
2015-07-26 15:12:36 +08:00
|
|
|
assert(token->length <= SOURCE_OFFSET_INVALID);
|
|
|
|
result.source_length = (source_offset_t)token->length;
|
|
|
|
|
2013-10-10 06:57:10 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
bool parse_tree_from_string(const wcstring &str, parse_tree_flags_t parse_flags,
|
|
|
|
parse_node_tree_t *output, parse_error_list_t *errors,
|
|
|
|
parse_token_type_t goal) {
|
2014-03-03 08:03:05 +08:00
|
|
|
parse_ll_t parser(goal);
|
2014-01-13 14:39:12 +08:00
|
|
|
parser.set_should_generate_error_messages(errors != NULL);
|
2013-10-10 06:57:10 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Construct the tokenizer.
|
2013-12-09 13:54:06 +08:00
|
|
|
tok_flags_t tok_options = 0;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (parse_flags & parse_flag_include_comments) tok_options |= TOK_SHOW_COMMENTS;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (parse_flags & parse_flag_accept_incomplete_tokens) tok_options |= TOK_ACCEPT_UNFINISHED;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (parse_flags & parse_flag_show_blank_lines) tok_options |= TOK_SHOW_BLANK_LINES;
|
2014-12-24 02:58:45 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (errors == NULL) tok_options |= TOK_SQUASH_ERRORS;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2015-07-26 15:58:32 +08:00
|
|
|
tokenizer_t tok(str.c_str(), tok_options);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// We are an LL(2) parser. We pass two tokens at a time. New tokens come in at index 1. Seed our
|
|
|
|
// queue with an initial token at index 1.
|
2014-01-14 16:38:55 +08:00
|
|
|
parse_token_t queue[2] = {kInvalidToken, kInvalidToken};
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Loop until we have a terminal token.
|
2015-07-26 15:12:36 +08:00
|
|
|
tok_t tokenizer_token;
|
2016-05-03 07:09:46 +08:00
|
|
|
for (size_t token_count = 0; queue[0].type != parse_token_type_terminate; token_count++) {
|
|
|
|
// Push a new token onto the queue.
|
2013-10-10 06:57:10 +08:00
|
|
|
queue[0] = queue[1];
|
2015-07-26 15:12:36 +08:00
|
|
|
queue[1] = next_parse_token(&tok, &tokenizer_token);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// If we are leaving things unterminated, then don't pass parse_token_type_terminate.
|
|
|
|
if (queue[0].type == parse_token_type_terminate &&
|
|
|
|
(parse_flags & parse_flag_leave_unterminated)) {
|
2013-12-27 17:38:43 +08:00
|
|
|
break;
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Pass these two tokens, unless we're still loading the queue. We know that queue[0] is
|
|
|
|
// valid; queue[1] may be invalid.
|
|
|
|
if (token_count > 0) {
|
2014-01-14 16:38:55 +08:00
|
|
|
parser.accept_tokens(queue[0], queue[1]);
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Handle tokenizer errors. This is a hack because really the parser should report this for
|
|
|
|
// itself; but it has no way of getting the tokenizer message.
|
|
|
|
if (queue[1].type == parse_special_type_tokenizer_error) {
|
2015-08-11 09:30:44 +08:00
|
|
|
parser.report_tokenizer_error(tokenizer_token);
|
2013-12-09 13:54:06 +08:00
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-10-30 11:51:03 +08:00
|
|
|
if (!parser.has_fatal_error()) {
|
|
|
|
continue;
|
|
|
|
}
|
2014-01-04 09:42:25 +08:00
|
|
|
|
2016-10-30 11:51:03 +08:00
|
|
|
// Handle errors.
|
|
|
|
if (!(parse_flags & parse_flag_continue_after_error)) {
|
|
|
|
break; // bail out
|
2013-08-09 06:06:46 +08:00
|
|
|
}
|
2016-10-30 11:51:03 +08:00
|
|
|
// Hack. Typically the parse error is due to the first token. However, if it's a
|
|
|
|
// tokenizer error, then has_fatal_error was set due to the check above; in that
|
|
|
|
// case the second token is what matters.
|
|
|
|
size_t error_token_idx = 0;
|
|
|
|
if (queue[1].type == parse_special_type_tokenizer_error) {
|
|
|
|
error_token_idx = (queue[1].type == parse_special_type_tokenizer_error ? 1 : 0);
|
|
|
|
token_count = -1; // so that it will be 0 after incrementing, and our tokenizer
|
|
|
|
// error will be ignored
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark a special error token, and then keep going.
|
|
|
|
const parse_token_t token = {parse_special_type_parse_error,
|
|
|
|
parse_keyword_none,
|
|
|
|
false,
|
|
|
|
false,
|
|
|
|
queue[error_token_idx].source_start,
|
|
|
|
queue[error_token_idx].source_length};
|
|
|
|
parser.accept_tokens(token, kInvalidToken);
|
|
|
|
parser.reset_symbols(goal);
|
2014-01-14 16:38:55 +08:00
|
|
|
}
|
2013-10-10 06:57:10 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Teach each node where its source range is.
|
2014-01-13 14:39:12 +08:00
|
|
|
parser.determine_node_ranges();
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Acquire the output from the parser.
|
2014-01-13 14:39:12 +08:00
|
|
|
parser.acquire_output(output, errors);
|
2013-10-09 17:03:50 +08:00
|
|
|
|
|
|
|
#if 0
|
|
|
|
//wcstring result = dump_tree(this->parser->nodes, str);
|
|
|
|
//fprintf(stderr, "Tree (%ld nodes):\n%ls", this->parser->nodes.size(), result.c_str());
|
|
|
|
fprintf(stderr, "%lu nodes, node size %lu, %lu bytes\n", output->size(), sizeof(parse_node_t), output->size() * sizeof(parse_node_t));
|
|
|
|
#endif
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Indicate if we had a fatal error.
|
|
|
|
return !parser.has_fatal_error();
|
2013-06-02 13:14:47 +08:00
|
|
|
}
|
2013-08-09 06:06:46 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
const parse_node_t *parse_node_tree_t::get_child(const parse_node_t &parent, node_offset_t which,
|
|
|
|
parse_token_type_t expected_type) const {
|
2013-08-09 06:06:46 +08:00
|
|
|
const parse_node_t *result = NULL;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// We may get nodes with no children if we had an incomplete parse. Don't consider than an
|
|
|
|
// error.
|
|
|
|
if (parent.child_count > 0) {
|
2013-10-07 16:04:37 +08:00
|
|
|
PARSE_ASSERT(which < parent.child_count);
|
|
|
|
node_offset_t child_offset = parent.child_offset(which);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (child_offset < this->size()) {
|
2013-10-07 16:04:37 +08:00
|
|
|
result = &this->at(child_offset);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// If we are given an expected type, then the node must be null or that type.
|
2013-10-07 16:04:37 +08:00
|
|
|
assert(expected_type == token_type_invalid || expected_type == result->type);
|
|
|
|
}
|
2013-08-09 06:06:46 +08:00
|
|
|
}
|
2013-08-11 15:35:00 +08:00
|
|
|
|
2013-08-09 06:06:46 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
const parse_node_t &parse_node_tree_t::find_child(const parse_node_t &parent,
|
|
|
|
parse_token_type_t type) const {
|
|
|
|
for (node_offset_t i = 0; i < parent.child_count; i++) {
|
2013-12-24 06:53:56 +08:00
|
|
|
const parse_node_t *child = this->get_child(parent, i);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (child->type == type) {
|
2013-12-24 06:53:56 +08:00
|
|
|
return *child;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
PARSE_ASSERT(0);
|
2016-05-03 07:09:46 +08:00
|
|
|
return *(parse_node_t *)(NULL); // unreachable
|
2013-12-24 06:53:56 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
const parse_node_t *parse_node_tree_t::get_parent(const parse_node_t &node,
|
|
|
|
parse_token_type_t expected_type) const {
|
2013-10-07 16:04:37 +08:00
|
|
|
const parse_node_t *result = NULL;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (node.parent != NODE_OFFSET_INVALID) {
|
2013-10-07 16:04:37 +08:00
|
|
|
PARSE_ASSERT(node.parent < this->size());
|
|
|
|
const parse_node_t &parent = this->at(node.parent);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (expected_type == token_type_invalid || expected_type == parent.type) {
|
|
|
|
// The type matches (or no type was requested).
|
2013-10-07 16:04:37 +08:00
|
|
|
result = &parent;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
2013-10-07 07:23:45 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
static void find_nodes_recursive(const parse_node_tree_t &tree, const parse_node_t &parent,
|
|
|
|
parse_token_type_t type,
|
|
|
|
parse_node_tree_t::parse_node_list_t *result, size_t max_count) {
|
|
|
|
if (result->size() < max_count) {
|
2013-12-12 10:34:28 +08:00
|
|
|
if (parent.type == type) result->push_back(&parent);
|
2016-05-03 07:09:46 +08:00
|
|
|
for (node_offset_t i = 0; i < parent.child_count; i++) {
|
2013-12-12 10:34:28 +08:00
|
|
|
const parse_node_t *child = tree.get_child(parent, i);
|
|
|
|
assert(child != NULL);
|
|
|
|
find_nodes_recursive(tree, *child, type, result, max_count);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
parse_node_tree_t::parse_node_list_t parse_node_tree_t::find_nodes(const parse_node_t &parent,
|
|
|
|
parse_token_type_t type,
|
|
|
|
size_t max_count) const {
|
2013-08-09 06:06:46 +08:00
|
|
|
parse_node_list_t result;
|
2013-12-12 10:34:28 +08:00
|
|
|
find_nodes_recursive(*this, parent, type, &result, max_count);
|
2013-08-09 06:06:46 +08:00
|
|
|
return result;
|
|
|
|
}
|
2013-10-07 18:56:09 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
/// Return true if the given node has the proposed ancestor as an ancestor (or is itself that
|
|
|
|
/// ancestor).
|
|
|
|
static bool node_has_ancestor(const parse_node_tree_t &tree, const parse_node_t &node,
|
|
|
|
const parse_node_t &proposed_ancestor) {
|
|
|
|
if (&node == &proposed_ancestor) {
|
|
|
|
return true; // found it
|
|
|
|
} else if (node.parent == NODE_OFFSET_INVALID) {
|
|
|
|
return false; // no more parents
|
2013-10-09 06:05:30 +08:00
|
|
|
}
|
2016-05-05 06:19:47 +08:00
|
|
|
|
|
|
|
// Recurse to the parent.
|
|
|
|
return node_has_ancestor(tree, tree.at(node.parent), proposed_ancestor);
|
2013-10-09 06:05:30 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
const parse_node_t *parse_node_tree_t::find_last_node_of_type(parse_token_type_t type,
|
|
|
|
const parse_node_t *parent) const {
|
2013-10-09 06:05:30 +08:00
|
|
|
const parse_node_t *result = NULL;
|
2016-05-03 07:09:46 +08:00
|
|
|
// Find nodes of the given type in the tree, working backwards.
|
2013-10-09 06:05:30 +08:00
|
|
|
size_t idx = this->size();
|
2016-05-03 07:09:46 +08:00
|
|
|
while (idx--) {
|
2013-10-09 06:05:30 +08:00
|
|
|
const parse_node_t &node = this->at(idx);
|
2016-10-23 02:21:13 +08:00
|
|
|
bool expected_type = (node.type == type);
|
|
|
|
if (expected_type && (parent == NULL || node_has_ancestor(*this, node, *parent))) {
|
|
|
|
// The types match and it has the right parent.
|
|
|
|
result = &node;
|
|
|
|
break;
|
2013-10-09 06:05:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
const parse_node_t *parse_node_tree_t::find_node_matching_source_location(
|
|
|
|
parse_token_type_t type, size_t source_loc, const parse_node_t *parent) const {
|
2013-10-13 09:17:03 +08:00
|
|
|
const parse_node_t *result = NULL;
|
2016-05-03 07:09:46 +08:00
|
|
|
// Find nodes of the given type in the tree, working backwards.
|
2013-10-13 09:17:03 +08:00
|
|
|
const size_t len = this->size();
|
2016-11-02 11:00:09 +08:00
|
|
|
for (size_t idx = 0; idx < len && result == NULL; idx++) {
|
2013-10-13 09:17:03 +08:00
|
|
|
const parse_node_t &node = this->at(idx);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Types must match.
|
|
|
|
if (node.type != type) continue;
|
2013-10-13 09:17:03 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Must contain source location.
|
|
|
|
if (!node.location_in_or_at_end_of_source_range(source_loc)) continue;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// If a parent is given, it must be an ancestor.
|
|
|
|
if (parent != NULL && !node_has_ancestor(*this, node, *parent)) continue;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Found it.
|
2013-10-13 09:17:03 +08:00
|
|
|
result = &node;
|
|
|
|
}
|
2016-11-02 11:00:09 +08:00
|
|
|
|
2013-10-13 09:17:03 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
bool parse_node_tree_t::argument_list_is_root(const parse_node_t &node) const {
|
2013-10-07 18:56:09 +08:00
|
|
|
bool result = true;
|
|
|
|
assert(node.type == symbol_argument_list || node.type == symbol_arguments_or_redirections_list);
|
|
|
|
const parse_node_t *parent = this->get_parent(node);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (parent != NULL) {
|
|
|
|
// We have a parent - check to make sure it's not another list!
|
|
|
|
result = parent->type != symbol_arguments_or_redirections_list &&
|
|
|
|
parent->type != symbol_argument_list;
|
2013-10-07 18:56:09 +08:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2013-10-09 17:03:50 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
enum parse_statement_decoration_t parse_node_tree_t::decoration_for_plain_statement(
|
|
|
|
const parse_node_t &node) const {
|
2013-10-09 17:03:50 +08:00
|
|
|
assert(node.type == symbol_plain_statement);
|
2016-05-03 07:09:46 +08:00
|
|
|
const parse_node_t *decorated_statement = this->get_parent(node, symbol_decorated_statement);
|
|
|
|
parse_node_tag_t tag =
|
|
|
|
decorated_statement ? decorated_statement->tag : parse_statement_decoration_none;
|
2015-12-16 06:59:03 +08:00
|
|
|
return static_cast<parse_statement_decoration_t>(tag);
|
2013-10-09 17:03:50 +08:00
|
|
|
}
|
2013-10-10 06:57:10 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
bool parse_node_tree_t::command_for_plain_statement(const parse_node_t &node, const wcstring &src,
|
|
|
|
wcstring *out_cmd) const {
|
2013-10-10 06:57:10 +08:00
|
|
|
bool result = false;
|
|
|
|
assert(node.type == symbol_plain_statement);
|
|
|
|
const parse_node_t *cmd_node = this->get_child(node, 0, parse_token_type_string);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (cmd_node != NULL && cmd_node->has_source()) {
|
2013-10-10 06:57:10 +08:00
|
|
|
out_cmd->assign(src, cmd_node->source_start, cmd_node->source_length);
|
|
|
|
result = true;
|
2016-05-03 07:09:46 +08:00
|
|
|
} else {
|
2013-10-10 06:57:10 +08:00
|
|
|
out_cmd->clear();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2013-10-14 07:58:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
bool parse_node_tree_t::statement_is_in_pipeline(const parse_node_t &node,
|
|
|
|
bool include_first) const {
|
|
|
|
// Moderately nasty hack! Walk up our ancestor chain and see if we are in a job_continuation.
|
|
|
|
// This checks if we are in the second or greater element in a pipeline; if we are the first
|
|
|
|
// element we treat this as false. This accepts a few statement types.
|
2013-12-12 10:34:28 +08:00
|
|
|
bool result = false;
|
|
|
|
const parse_node_t *ancestor = &node;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// If we're given a plain statement, try to get its decorated statement parent.
|
2014-01-14 05:14:18 +08:00
|
|
|
if (ancestor && ancestor->type == symbol_plain_statement)
|
2013-12-12 10:34:28 +08:00
|
|
|
ancestor = this->get_parent(*ancestor, symbol_decorated_statement);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (ancestor) ancestor = this->get_parent(*ancestor, symbol_statement);
|
|
|
|
if (ancestor) ancestor = this->get_parent(*ancestor);
|
|
|
|
|
|
|
|
if (ancestor) {
|
|
|
|
if (ancestor->type == symbol_job_continuation) {
|
|
|
|
// Second or more in a pipeline.
|
2013-12-12 10:34:28 +08:00
|
|
|
result = true;
|
2016-05-03 07:09:46 +08:00
|
|
|
} else if (ancestor->type == symbol_job && include_first) {
|
|
|
|
// Check to see if we have a job continuation that's not empty.
|
|
|
|
const parse_node_t *continuation =
|
|
|
|
this->get_child(*ancestor, 1, symbol_job_continuation);
|
2013-12-12 10:34:28 +08:00
|
|
|
result = (continuation != NULL && continuation->child_count > 0);
|
|
|
|
}
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2013-12-12 10:34:28 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
enum token_type parse_node_tree_t::type_for_redirection(const parse_node_t &redirection_node,
|
|
|
|
const wcstring &src, int *out_fd,
|
|
|
|
wcstring *out_target) const {
|
2013-10-14 07:58:40 +08:00
|
|
|
assert(redirection_node.type == symbol_redirection);
|
|
|
|
enum token_type result = TOK_NONE;
|
2016-05-03 07:09:46 +08:00
|
|
|
const parse_node_t *redirection_primitive =
|
|
|
|
this->get_child(redirection_node, 0, parse_token_type_redirection); // like 2>
|
|
|
|
const parse_node_t *redirection_target =
|
|
|
|
this->get_child(redirection_node, 1, parse_token_type_string); // like &1 or file path
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
if (redirection_primitive != NULL && redirection_primitive->has_source()) {
|
2013-12-24 06:53:56 +08:00
|
|
|
result = redirection_type_for_string(redirection_primitive->get_source(src), out_fd);
|
2013-10-14 07:58:40 +08:00
|
|
|
}
|
2016-05-03 07:09:46 +08:00
|
|
|
if (out_target != NULL) {
|
2013-10-14 07:58:40 +08:00
|
|
|
*out_target = redirection_target ? redirection_target->get_source(src) : L"";
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2013-12-12 10:34:28 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
const parse_node_t *parse_node_tree_t::header_node_for_block_statement(
|
|
|
|
const parse_node_t &node) const {
|
2013-12-12 10:34:28 +08:00
|
|
|
const parse_node_t *result = NULL;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (node.type == symbol_block_statement) {
|
2013-12-12 10:34:28 +08:00
|
|
|
const parse_node_t *block_header = this->get_child(node, 0, symbol_block_header);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (block_header != NULL) {
|
2013-12-12 10:34:28 +08:00
|
|
|
result = this->get_child(*block_header, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2014-01-02 07:29:56 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
parse_node_tree_t::parse_node_list_t parse_node_tree_t::specific_statements_for_job(
|
|
|
|
const parse_node_t &job) const {
|
2014-01-02 07:29:56 +08:00
|
|
|
assert(job.type == symbol_job);
|
|
|
|
parse_node_list_t result;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Initial statement (non-specific).
|
2014-01-02 07:29:56 +08:00
|
|
|
result.push_back(get_child(job, 0, symbol_statement));
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Our cursor variable. Walk over the list of continuations.
|
2014-01-02 07:29:56 +08:00
|
|
|
const parse_node_t *continuation = get_child(job, 1, symbol_job_continuation);
|
2016-05-03 07:09:46 +08:00
|
|
|
while (continuation != NULL && continuation->child_count > 0) {
|
2014-01-02 07:29:56 +08:00
|
|
|
result.push_back(get_child(*continuation, 1, symbol_statement));
|
|
|
|
continuation = get_child(*continuation, 2, symbol_job_continuation);
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Result now contains a list of statements. But we want a list of specific statements e.g.
|
|
|
|
// symbol_switch_statement. So replace them in-place in the vector.
|
|
|
|
for (size_t i = 0; i < result.size(); i++) {
|
2014-01-02 07:29:56 +08:00
|
|
|
const parse_node_t *statement = result.at(i);
|
|
|
|
assert(statement->type == symbol_statement);
|
|
|
|
result.at(i) = this->get_child(*statement, 0);
|
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2014-01-02 07:29:56 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
parse_node_tree_t::parse_node_list_t parse_node_tree_t::comment_nodes_for_node(
|
|
|
|
const parse_node_t &parent) const {
|
2014-12-24 02:58:45 +08:00
|
|
|
parse_node_list_t result;
|
2016-05-03 07:09:46 +08:00
|
|
|
if (parent.has_comments()) {
|
|
|
|
// Walk all our nodes, looking for comment nodes that have the given node as a parent.
|
|
|
|
for (size_t i = 0; i < this->size(); i++) {
|
2014-12-24 02:58:45 +08:00
|
|
|
const parse_node_t &potential_comment = this->at(i);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (potential_comment.type == parse_special_type_comment &&
|
|
|
|
this->get_parent(potential_comment) == &parent) {
|
2014-12-24 02:58:45 +08:00
|
|
|
result.push_back(&potential_comment);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
enum parse_bool_statement_type_t parse_node_tree_t::statement_boolean_type(
|
|
|
|
const parse_node_t &node) {
|
2014-11-03 05:11:27 +08:00
|
|
|
assert(node.type == symbol_boolean_statement);
|
2015-12-16 06:59:03 +08:00
|
|
|
return static_cast<parse_bool_statement_type_t>(node.tag);
|
2014-11-03 05:11:27 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
bool parse_node_tree_t::job_should_be_backgrounded(const parse_node_t &job) const {
|
2014-03-29 05:39:47 +08:00
|
|
|
assert(job.type == symbol_job);
|
|
|
|
const parse_node_t *opt_background = get_child(job, 2, symbol_optional_background);
|
2016-10-23 11:32:25 +08:00
|
|
|
return opt_background != NULL && opt_background->tag == parse_background;
|
2014-03-29 05:39:47 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
const parse_node_t *parse_node_tree_t::next_node_in_node_list(
|
|
|
|
const parse_node_t &node_list, parse_token_type_t entry_type,
|
|
|
|
const parse_node_t **out_list_tail) const {
|
2014-01-06 07:23:42 +08:00
|
|
|
parse_token_type_t list_type = node_list.type;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Paranoia - it doesn't make sense for a list type to contain itself.
|
2014-01-06 07:23:42 +08:00
|
|
|
assert(list_type != entry_type);
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2014-01-06 07:23:42 +08:00
|
|
|
const parse_node_t *list_cursor = &node_list;
|
|
|
|
const parse_node_t *list_entry = NULL;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Loop while we don't have an item but do have a list. Note that some nodes may contain
|
|
|
|
// nothing; e.g. job_list contains blank lines as a production.
|
|
|
|
while (list_entry == NULL && list_cursor != NULL) {
|
2014-01-06 07:23:42 +08:00
|
|
|
const parse_node_t *next_cursor = NULL;
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Walk through the children.
|
|
|
|
for (node_offset_t i = 0; i < list_cursor->child_count; i++) {
|
2014-01-06 07:23:42 +08:00
|
|
|
const parse_node_t *child = this->get_child(*list_cursor, i);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (child->type == entry_type) {
|
|
|
|
// This is the list entry.
|
2014-01-06 07:23:42 +08:00
|
|
|
list_entry = child;
|
2016-05-03 07:09:46 +08:00
|
|
|
} else if (child->type == list_type) {
|
|
|
|
// This is the next in the list.
|
2014-01-06 07:23:42 +08:00
|
|
|
next_cursor = child;
|
|
|
|
}
|
|
|
|
}
|
2016-05-03 07:09:46 +08:00
|
|
|
// Go to the next entry, even if it's NULL.
|
2014-01-06 07:23:42 +08:00
|
|
|
list_cursor = next_cursor;
|
2014-01-02 07:29:56 +08:00
|
|
|
}
|
2014-01-15 17:40:40 +08:00
|
|
|
|
2016-05-03 07:09:46 +08:00
|
|
|
// Return what we got.
|
2014-01-06 07:23:42 +08:00
|
|
|
assert(list_cursor == NULL || list_cursor->type == list_type);
|
|
|
|
assert(list_entry == NULL || list_entry->type == entry_type);
|
2016-05-03 07:09:46 +08:00
|
|
|
if (out_list_tail != NULL) *out_list_tail = list_cursor;
|
2014-01-06 07:23:42 +08:00
|
|
|
return list_entry;
|
2014-01-02 07:29:56 +08:00
|
|
|
}
|