diff --git a/complete.cpp b/complete.cpp index 78282cc7a..c7d12f815 100644 --- a/complete.cpp +++ b/complete.cpp @@ -1556,8 +1556,9 @@ static int try_complete_user( const wchar_t *cmd, void complete( const wchar_t *cmd, std::vector &comp ) { + const wchar_t *tok_begin, *tok_end, *cmdsubst_begin, *cmdsubst_end, *prev_begin, *prev_end; - wchar_t *buff; + wcstring buff; tokenizer tok; const wchar_t *current_token=0, *current_command=0, *prev_token=0; int on_command=0; @@ -1601,10 +1602,7 @@ void complete( const wchar_t *cmd, { pos = cursor_pos-(cmdsubst_begin-cmd); - buff = wcsndup( cmdsubst_begin, cmdsubst_end-cmdsubst_begin ); - - if( !buff ) - done=1; + buff = wcstring( cmdsubst_begin, cmdsubst_end-cmdsubst_begin ); } if( !done ) @@ -1612,7 +1610,7 @@ void complete( const wchar_t *cmd, int had_cmd=0; int end_loop=0; - tok_init( &tok, buff, TOK_ACCEPT_UNFINISHED ); + tok_init( &tok, buff.c_str(), TOK_ACCEPT_UNFINISHED ); while( tok_has_next( &tok) && !end_loop ) { @@ -1623,21 +1621,21 @@ void complete( const wchar_t *cmd, case TOK_STRING: { - wchar_t *ncmd = tok_last( &tok ); - int is_ddash = (wcscmp( ncmd, L"--" ) == 0) && ( (tok_get_pos( &tok )+2) < pos ); + const wcstring ncmd = tok_last( &tok ); + int is_ddash = (ncmd == L"--") && ( (tok_get_pos( &tok )+2) < pos ); if( !had_cmd ) { if( parser_keywords_is_subcommand( ncmd ) ) { - if( wcscmp( ncmd, L"builtin" )==0) + if (ncmd == L"builtin" ) { use_function = 0; use_command = 0; use_builtin = 1; } - else if( wcscmp( ncmd, L"command" )==0) + else if (ncmd == L"command") { use_command = 1; use_function = 0; @@ -1653,9 +1651,9 @@ void complete( const wchar_t *cmd, int token_end; free( (void *)current_command ); - current_command = wcsdup( ncmd ); + current_command = wcsdup( ncmd.c_str() ); - token_end = tok_get_pos( &tok ) + wcslen( ncmd ); + token_end = tok_get_pos( &tok ) + ncmd.size(); on_command = (pos <= token_end ); had_cmd=1; @@ -1703,7 +1701,6 @@ void complete( const wchar_t *cmd, } tok_destroy( &tok ); - free( buff ); /* Get the string to complete diff --git a/parser.cpp b/parser.cpp index 5a287e7cd..1349c6d17 100644 --- a/parser.cpp +++ b/parser.cpp @@ -1054,9 +1054,9 @@ const wchar_t *parser_t::current_line() int lineno=1; const wchar_t *file; - wchar_t *whole_str; - wchar_t *line; - wchar_t *line_end; + const wchar_t *whole_str; + const wchar_t *line; + const wchar_t *line_end; int i; int offset; int current_line_width; @@ -1158,7 +1158,7 @@ const wchar_t *parser_t::current_line() } } - free( line ); + free( (void *)line ); parser_t::stack_trace( current_block, lineinfo ); return lineinfo.c_str(); @@ -1653,7 +1653,7 @@ int parser_t::parse_job( process_t *p, case TOK_PIPE: { - wchar_t *str = tok_string( tok ); + const wchar_t *str = tok_string( tok ); if( tok_get_pos(tok)>0 && str[tok_get_pos(tok)-1] == L'|' ) { error( SYNTAX_ERROR, @@ -2333,7 +2333,7 @@ void parser_t::eval_job( tokenizer *tok ) case TOK_BACKGROUND: { - wchar_t *str = tok_string( tok ); + const wchar_t *str = tok_string( tok ); if( tok_get_pos(tok)>0 && str[tok_get_pos(tok)-1] == L'&' ) { error( SYNTAX_ERROR, diff --git a/tokenizer.cpp b/tokenizer.cpp index 7ea5491fa..f02f47e04 100644 --- a/tokenizer.cpp +++ b/tokenizer.cpp @@ -144,7 +144,7 @@ void tok_init( tokenizer *tok, const wchar_t *b, int flags ) tok->has_next=1; tok->has_next = (*b != L'\0'); - tok->orig_buff = tok->buff = (wchar_t *)(b); + tok->orig_buff = tok->buff = b; tok_next( tok ); } @@ -154,7 +154,7 @@ void tok_destroy( tokenizer *tok ) free( tok->last ); if( tok->free_orig ) - free( tok->orig_buff ); + free( (void *)tok->orig_buff ); } int tok_last_type( tokenizer *tok ) @@ -624,7 +624,7 @@ void tok_next( tokenizer *tok ) { if( iswdigit( *tok->buff ) ) { - wchar_t *orig = tok->buff; + const wchar_t *orig = tok->buff; int fd = 0; while( iswdigit( *tok->buff ) ) fd = (fd*10) + (*(tok->buff++) - L'0'); @@ -646,7 +646,7 @@ void tok_next( tokenizer *tok ) } -wchar_t *tok_string( tokenizer *tok ) +const wchar_t *tok_string( tokenizer *tok ) { return tok?tok->orig_buff:0; } diff --git a/tokenizer.h b/tokenizer.h index 731f49bf0..ff7c38e90 100644 --- a/tokenizer.h +++ b/tokenizer.h @@ -68,9 +68,9 @@ enum tokenizer_error struct tokenizer { /** A pointer into the original string, showing where the next token begins */ - wchar_t *buff; + const wchar_t *buff; /** A copy of the original string */ - wchar_t *orig_buff; + const wchar_t *orig_buff; /** A pointer to the last token*/ wchar_t *last; @@ -150,7 +150,7 @@ void tok_destroy( tokenizer *tok ); /** Returns the original string to tokenizer */ -wchar_t *tok_string( tokenizer *tok ); +const wchar_t *tok_string( tokenizer *tok ); /**