diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index 05fc85533..6c7f6700b 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -807,6 +807,7 @@ bool move_word_state_machine_t::consume_char_path_components(wchar_t c) { } bool move_word_state_machine_t::consume_char_whitespace(wchar_t c) { + // Consume a "word" of printable characters plus any leading whitespace. enum { s_always_one = 0, s_blank, s_graph, s_end }; bool consumed = false; @@ -814,7 +815,13 @@ bool move_word_state_machine_t::consume_char_whitespace(wchar_t c) { switch (state) { case s_always_one: { consumed = true; // always consume the first character - state = s_blank; + // If it's a graphical char, only consume those from here. + if (iswgraph(c)) { + state = s_graph; + } else { + // If it's whitespace, keep consuming whitespace until the graphs. + state = s_blank; + } break; } case s_blank: { diff --git a/tests/pexpects/bind.py b/tests/pexpects/bind.py index 1f331c6eb..660581807 100644 --- a/tests/pexpects/bind.py +++ b/tests/pexpects/bind.py @@ -276,3 +276,16 @@ sendline("bind q self-insert-notfirst") expect_prompt() sendline("qqqecho qqq") expect_prompt("qqq", unmatched="Leading qs not stripped") + +# Test bigword with single-character words. +sendline("bind \cg kill-bigword") +expect_prompt() +send("a b c d\x01") # ctrl-a, move back to the beginning of the line +send("\x07") # ctrl-g, kill bigword +sendline("echo") +expect_prompt("^b c d") + +send(" a b c d\x01") # ctrl-a, move back to the beginning of the line +send("\x07") # ctrl-g, kill bigword +sendline("echo") +expect_prompt("^b c d")