2016-05-04 05:35:12 +08:00
|
|
|
// A specialized tokenizer for tokenizing the fish language. In the future, the tokenizer should be
|
|
|
|
// extended to support marks, tokenizing multiple strings and disposing of unused string segments.
|
2005-10-04 23:11:39 +08:00
|
|
|
#ifndef FISH_TOKENIZER_H
|
|
|
|
#define FISH_TOKENIZER_H
|
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
#include <stddef.h>
|
2016-04-21 14:00:54 +08:00
|
|
|
|
2012-11-22 14:09:35 +08:00
|
|
|
#include "common.h"
|
2018-02-24 06:30:15 +08:00
|
|
|
#include "maybe.h"
|
2018-03-12 08:36:10 +08:00
|
|
|
#include "parse_constants.h"
|
2019-12-13 08:44:24 +08:00
|
|
|
#include "redirection.h"
|
2005-10-04 23:11:39 +08:00
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
/// Token types.
|
2019-10-14 07:06:16 +08:00
|
|
|
enum class token_type_t {
|
|
|
|
error, /// Error reading token
|
|
|
|
string, /// String token
|
|
|
|
pipe, /// Pipe token
|
|
|
|
andand, /// && token
|
|
|
|
oror, /// || token
|
|
|
|
end, /// End token (semicolon or newline, not literal end)
|
|
|
|
redirect, /// redirection token
|
|
|
|
background, /// send job to bg token
|
|
|
|
comment, /// comment token
|
2012-02-16 03:33:41 +08:00
|
|
|
};
|
2006-10-07 08:56:25 +08:00
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
/// Flag telling the tokenizer to accept incomplete parameters, i.e. parameters with mismatching
|
2019-11-25 19:03:25 +08:00
|
|
|
/// parenthesis, etc. This is useful for tab-completion.
|
2005-09-20 21:26:39 +08:00
|
|
|
#define TOK_ACCEPT_UNFINISHED 1
|
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
/// Flag telling the tokenizer not to remove comments. Useful for syntax highlighting.
|
2005-09-20 21:26:39 +08:00
|
|
|
#define TOK_SHOW_COMMENTS 2
|
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
/// Ordinarily, the tokenizer ignores newlines following a newline, or a semicolon. This flag tells
|
|
|
|
/// the tokenizer to return each of them as a separate END.
|
2018-02-24 09:28:12 +08:00
|
|
|
#define TOK_SHOW_BLANK_LINES 4
|
2014-11-26 02:43:03 +08:00
|
|
|
|
2019-10-28 07:08:49 +08:00
|
|
|
/// Make an effort to continue after an error.
|
|
|
|
#define TOK_CONTINUE_AFTER_ERROR 8
|
|
|
|
|
2012-11-22 09:48:35 +08:00
|
|
|
typedef unsigned int tok_flags_t;
|
2005-09-20 21:26:39 +08:00
|
|
|
|
2018-09-28 09:25:49 +08:00
|
|
|
enum class tokenizer_error_t {
|
|
|
|
none,
|
|
|
|
unterminated_quote,
|
|
|
|
unterminated_subshell,
|
|
|
|
unterminated_slice,
|
|
|
|
unterminated_escape,
|
|
|
|
invalid_redirect,
|
|
|
|
invalid_pipe,
|
2019-10-28 05:35:14 +08:00
|
|
|
invalid_pipe_ampersand,
|
2018-09-28 09:25:49 +08:00
|
|
|
closing_unopened_subshell,
|
|
|
|
illegal_slice,
|
|
|
|
closing_unopened_brace,
|
|
|
|
unterminated_brace,
|
|
|
|
expected_pclose_found_bclose,
|
|
|
|
expected_bclose_found_pclose,
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Get the error message for an error \p err.
|
2019-03-15 06:12:14 +08:00
|
|
|
const wchar_t *tokenizer_get_error_message(tokenizer_error_t err);
|
2018-09-28 09:25:49 +08:00
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
struct tok_t {
|
|
|
|
// The type of the token.
|
2019-10-14 07:06:16 +08:00
|
|
|
token_type_t type;
|
2018-02-24 06:30:15 +08:00
|
|
|
|
|
|
|
// Offset of the token.
|
|
|
|
size_t offset{0};
|
|
|
|
// Length of the token.
|
|
|
|
size_t length{0};
|
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
// If an error, this is the error code.
|
2018-09-28 09:25:49 +08:00
|
|
|
tokenizer_error_t error{tokenizer_error_t::none};
|
2018-02-24 09:28:12 +08:00
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
// If an error, this is the offset of the error within the token. A value of 0 means it occurred
|
|
|
|
// at 'offset'.
|
2019-11-09 08:55:54 +08:00
|
|
|
size_t error_offset_within_token{size_t(-1)};
|
2016-05-04 05:35:12 +08:00
|
|
|
|
2019-10-14 07:06:16 +08:00
|
|
|
// Construct from a token type.
|
|
|
|
explicit tok_t(token_type_t type);
|
2019-10-29 20:32:26 +08:00
|
|
|
|
|
|
|
/// Returns whether the given location is within the source range or at its end.
|
|
|
|
bool location_in_or_at_end_of_source_range(size_t loc) const {
|
|
|
|
return offset <= loc && loc - offset <= length;
|
|
|
|
}
|
|
|
|
/// Gets source for the token, or the empty string if it has no source.
|
2020-09-27 08:21:22 +08:00
|
|
|
wcstring get_source(const wcstring &str) const { return wcstring(str, offset, length); }
|
2015-07-26 14:05:47 +08:00
|
|
|
};
|
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
/// The tokenizer struct.
|
2021-07-23 01:43:25 +08:00
|
|
|
class tokenizer_t : noncopyable_t {
|
2016-05-04 05:35:12 +08:00
|
|
|
/// A pointer into the original string, showing where the next token begins.
|
2019-11-09 08:40:15 +08:00
|
|
|
const wchar_t *token_cursor;
|
2018-02-20 07:10:10 +08:00
|
|
|
/// The start of the original string.
|
|
|
|
const wchar_t *const start;
|
2018-02-24 06:30:15 +08:00
|
|
|
/// Whether we have additional tokens.
|
2018-02-20 07:10:10 +08:00
|
|
|
bool has_next{true};
|
2016-05-04 05:35:12 +08:00
|
|
|
/// Whether incomplete tokens are accepted.
|
2018-02-20 07:10:10 +08:00
|
|
|
bool accept_unfinished{false};
|
2016-05-04 05:35:12 +08:00
|
|
|
/// Whether comments should be returned.
|
2018-02-20 07:10:10 +08:00
|
|
|
bool show_comments{false};
|
2016-05-04 05:35:12 +08:00
|
|
|
/// Whether all blank lines are returned.
|
2018-02-20 07:10:10 +08:00
|
|
|
bool show_blank_lines{false};
|
2019-10-28 07:08:49 +08:00
|
|
|
/// Whether to attempt to continue after an error.
|
|
|
|
bool continue_after_error{false};
|
2018-03-12 21:35:09 +08:00
|
|
|
/// Whether to continue the previous line after the comment.
|
|
|
|
bool continue_line_after_comment{false};
|
2016-05-04 05:35:12 +08:00
|
|
|
|
2018-09-28 09:25:49 +08:00
|
|
|
tok_t call_error(tokenizer_error_t error_type, const wchar_t *token_start,
|
2019-10-28 06:44:08 +08:00
|
|
|
const wchar_t *error_loc, maybe_t<size_t> token_length = {});
|
2018-02-24 06:30:15 +08:00
|
|
|
tok_t read_string();
|
2016-05-04 05:35:12 +08:00
|
|
|
|
|
|
|
public:
|
|
|
|
/// Constructor for a tokenizer. b is the string that is to be tokenized. It is not copied, and
|
|
|
|
/// should not be freed by the caller until after the tokenizer is destroyed.
|
|
|
|
///
|
|
|
|
/// \param b The string to tokenize
|
|
|
|
/// \param flags Flags to the tokenizer. Setting TOK_ACCEPT_UNFINISHED will cause the tokenizer
|
|
|
|
/// to accept incomplete tokens, such as a subshell without a closing parenthesis, as a valid
|
|
|
|
/// token. Setting TOK_SHOW_COMMENTS will return comments as tokens
|
2019-11-19 08:54:36 +08:00
|
|
|
tokenizer_t(const wchar_t *start, tok_flags_t flags);
|
2016-05-04 05:35:12 +08:00
|
|
|
|
2019-10-14 07:06:16 +08:00
|
|
|
/// Returns the next token, or none() if we are at the end.
|
|
|
|
maybe_t<tok_t> next();
|
2018-02-24 06:30:15 +08:00
|
|
|
|
|
|
|
/// Returns the text of a token, as a string.
|
|
|
|
wcstring text_of(const tok_t &tok) const { return wcstring(start + tok.offset, tok.length); }
|
2018-02-24 07:58:13 +08:00
|
|
|
|
|
|
|
/// Copies a token's text into a string. This is useful for reusing storage.
|
|
|
|
/// Returns a reference to the string.
|
|
|
|
const wcstring ©_text_of(const tok_t &tok, wcstring *result) {
|
|
|
|
return result->assign(start + tok.offset, tok.length);
|
|
|
|
}
|
2012-11-22 09:48:35 +08:00
|
|
|
};
|
2005-09-20 21:26:39 +08:00
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
/// Returns only the first token from the specified string. This is a convenience function, used to
|
|
|
|
/// retrieve the first token of a string. This can be useful for error messages, etc. On failure,
|
|
|
|
/// returns the empty string.
|
2015-07-26 15:58:32 +08:00
|
|
|
wcstring tok_first(const wcstring &str);
|
2005-09-20 21:26:39 +08:00
|
|
|
|
2020-02-24 07:16:12 +08:00
|
|
|
/// Like to tok_first, but skip variable assignments like A=B.
|
|
|
|
wcstring tok_command(const wcstring &str);
|
|
|
|
|
2019-10-15 04:20:31 +08:00
|
|
|
/// Struct wrapping up a parsed pipe or redirection.
|
|
|
|
struct pipe_or_redir_t {
|
|
|
|
// The redirected fd, or -1 on overflow.
|
2019-12-11 08:14:34 +08:00
|
|
|
// In the common case of a pipe, this is 1 (STDOUT_FILENO).
|
2019-10-15 04:20:31 +08:00
|
|
|
// For example, in the case of "3>&1" this will be 3.
|
2019-12-11 08:14:34 +08:00
|
|
|
int fd{-1};
|
2013-12-24 06:53:56 +08:00
|
|
|
|
2019-10-15 04:20:31 +08:00
|
|
|
// Whether we are a pipe (true) or redirection (false).
|
|
|
|
bool is_pipe{false};
|
2013-12-29 08:18:38 +08:00
|
|
|
|
2019-10-15 04:20:31 +08:00
|
|
|
// The redirection mode if the type is redirect.
|
|
|
|
// Ignored for pipes.
|
|
|
|
redirection_mode_t mode{redirection_mode_t::overwrite};
|
|
|
|
|
2019-10-15 06:45:40 +08:00
|
|
|
// Whether, in addition to this redirection, stderr should also be dup'd to stdout
|
|
|
|
// For example &| or &>
|
|
|
|
bool stderr_merge{false};
|
|
|
|
|
2019-10-15 04:20:31 +08:00
|
|
|
// Number of characters consumed when parsing the string.
|
|
|
|
size_t consumed{0};
|
|
|
|
|
|
|
|
// Construct from a string.
|
|
|
|
static maybe_t<pipe_or_redir_t> from_string(const wchar_t *buff);
|
|
|
|
static maybe_t<pipe_or_redir_t> from_string(const wcstring &buff) {
|
|
|
|
return from_string(buff.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
// \return the oflags (as in open(2)) for this redirection.
|
|
|
|
int oflags() const;
|
|
|
|
|
|
|
|
// \return if we are "valid". Here "valid" means only that the source fd did not overflow.
|
|
|
|
// For example 99999999999> is invalid.
|
|
|
|
bool is_valid() const { return fd >= 0; }
|
|
|
|
|
|
|
|
// \return the token type for this redirection.
|
|
|
|
token_type_t token_type() const {
|
|
|
|
return is_pipe ? token_type_t::pipe : token_type_t::redirect;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
pipe_or_redir_t();
|
|
|
|
};
|
2013-10-14 07:58:40 +08:00
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
enum move_word_style_t {
|
|
|
|
move_word_style_punctuation, // stop at punctuation
|
|
|
|
move_word_style_path_components, // stops at path components
|
|
|
|
move_word_style_whitespace // stops at whitespace
|
2012-12-21 09:37:09 +08:00
|
|
|
};
|
2006-10-07 08:56:25 +08:00
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
/// Our state machine that implements "one word" movement or erasure.
|
|
|
|
class move_word_state_machine_t {
|
|
|
|
private:
|
2012-12-21 09:37:09 +08:00
|
|
|
bool consume_char_punctuation(wchar_t c);
|
|
|
|
bool consume_char_path_components(wchar_t c);
|
|
|
|
bool is_path_component_character(wchar_t c);
|
Add 'bigword' vi key bindings
- Add four new functions: forward-bigword, backward-bigword,
kill-bigword, backward-kill-bigword
- Add new enum move_word_style_whitespace and related state machine
method
- Change vi key bindings to operate on bigwords: B, gE, W, E, dW, diW,
daW, dE, dB, dgE, cW, ciW, caW, cE, cB, cgE, yW, yiW, yaW, yE, yB,
ygE
2015-05-31 06:44:25 +08:00
|
|
|
bool consume_char_whitespace(wchar_t c);
|
2012-12-23 04:21:31 +08:00
|
|
|
|
2012-12-21 09:37:09 +08:00
|
|
|
int state;
|
|
|
|
move_word_style_t style;
|
2012-12-11 08:23:08 +08:00
|
|
|
|
2016-05-04 05:35:12 +08:00
|
|
|
public:
|
2019-11-19 08:54:36 +08:00
|
|
|
explicit move_word_state_machine_t(move_word_style_t syl);
|
2012-12-11 08:23:08 +08:00
|
|
|
bool consume_char(wchar_t c);
|
2012-12-21 09:37:09 +08:00
|
|
|
void reset();
|
2012-12-11 08:23:08 +08:00
|
|
|
};
|
|
|
|
|
2020-02-24 07:14:39 +08:00
|
|
|
/// The position of the equal sign in a variable assignment like foo=bar.
|
|
|
|
maybe_t<size_t> variable_assignment_equals_pos(const wcstring &txt);
|
|
|
|
|
2005-10-04 23:11:39 +08:00
|
|
|
#endif
|