update: normed stuff
This commit is contained in:
parent
71d9a201b5
commit
b058348d35
20 changed files with 388 additions and 193 deletions
|
|
@ -6,7 +6,7 @@
|
|||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/05 18:56:12 by maiboyer ### ########.fr */
|
||||
/* Updated: 2024/10/06 13:51:52 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
|
|
@ -26,50 +26,53 @@ void push_token_and_create_new_chr(\
|
|||
void push_token_and_set_new_chr(\
|
||||
t_vec_token *tokens, t_token *tok, enum e_token ttype, char c);
|
||||
|
||||
static bool _dquote_inner2(t_token *ctok, t_token*out, char c)
|
||||
{
|
||||
if (me_isspace(c))
|
||||
{
|
||||
if (ctok->type == TOK_NONE)
|
||||
*ctok = token_new(TOK_WHITESPACE);
|
||||
if (ctok->type != TOK_WHITESPACE)
|
||||
{
|
||||
vec_token_push(&out->subtokens, *ctok);
|
||||
*ctok = token_new(TOK_WHITESPACE);
|
||||
}
|
||||
string_push_char(&ctok->string, c);
|
||||
}
|
||||
else if (c == '$')
|
||||
push_token_and_create_new(&out->subtokens, ctok, TOK_DOLLAR, "$");
|
||||
else if (c == '(')
|
||||
push_token_and_create_new(&out->subtokens, ctok, TOK_LPAREN, "(");
|
||||
else if (c == ')')
|
||||
push_token_and_create_new(&out->subtokens, ctok, TOK_RPAREN, ")");
|
||||
else if (!(me_isalnum(c) || c == '_'))
|
||||
push_token_and_create_new_chr(&out->subtokens, ctok, TOK_NALPHANUM, c);
|
||||
else
|
||||
return (false);
|
||||
return (true);
|
||||
}
|
||||
|
||||
t_error _parse_dquote_inner(t_token dquote, t_vec_token *append)
|
||||
{
|
||||
t_token ctok;
|
||||
t_token out;
|
||||
t_usize i;
|
||||
char c;
|
||||
|
||||
out = token_new_meta(TOK_DQUOTE);
|
||||
i = 0;
|
||||
ctok = token_new_none();
|
||||
while (dquote.string.buf[i] != '\0')
|
||||
{
|
||||
c = dquote.string.buf[i++];
|
||||
if (me_isspace(c))
|
||||
{
|
||||
if (ctok.type == TOK_NONE)
|
||||
ctok = token_new(TOK_WHITESPACE);
|
||||
if (ctok.type != TOK_WHITESPACE)
|
||||
{
|
||||
vec_token_push(&out.subtokens, ctok);
|
||||
ctok = token_new(TOK_WHITESPACE);
|
||||
}
|
||||
string_push_char(&ctok.string, c);
|
||||
}
|
||||
else if (c == '$')
|
||||
push_token_and_create_new(&out.subtokens, &ctok, TOK_DOLLAR, "$");
|
||||
else if (c == '(')
|
||||
push_token_and_create_new(&out.subtokens, &ctok, TOK_LPAREN, "(");
|
||||
else if (c == ')')
|
||||
push_token_and_create_new(&out.subtokens, &ctok, TOK_RPAREN, ")");
|
||||
else if (!(me_isalnum(c) || c == '_'))
|
||||
push_token_and_create_new_chr(&out.subtokens, &ctok, TOK_NALPHANUM, c);
|
||||
else
|
||||
if (!_dquote_inner2(&ctok, &out, dquote.string.buf[i++]))
|
||||
{
|
||||
if (ctok.type == TOK_NONE)
|
||||
ctok = token_new(TOK_NQUOTE);
|
||||
if (ctok.type != TOK_NQUOTE)
|
||||
{
|
||||
vec_token_push(&out.subtokens, ctok);
|
||||
ctok = token_new(TOK_NQUOTE);
|
||||
}
|
||||
string_push_char(&ctok.string, c);
|
||||
ctok = (vec_token_push(&out.subtokens, ctok), \
|
||||
token_new(TOK_NQUOTE));
|
||||
string_push_char(&ctok.string, dquote.string.buf[i - 1]);
|
||||
}
|
||||
};
|
||||
}
|
||||
if (ctok.type != TOK_NONE)
|
||||
vec_token_push(&out.subtokens, ctok);
|
||||
if (ts_dq_apply_passes(out.subtokens, &out.subtokens))
|
||||
|
|
@ -99,7 +102,7 @@ t_error ts_double_string_pass(t_vec_token input, t_vec_token *output)
|
|||
if (_parse_dquote_inner(input.buffer[i], &out))
|
||||
return (vec_token_free(input), ERROR);
|
||||
}
|
||||
else
|
||||
else
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
i++;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/04 19:09:45 by rparodi ### ########.fr */
|
||||
/* Updated: 2024/10/06 13:42:09 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
|
|
@ -38,7 +38,7 @@ t_error ts_double_amp(t_vec_token input, t_vec_token *output)
|
|||
if (i + 1 >= input.len)
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
else if (input.buffer[i].type == TOK_AMP
|
||||
&& input.buffer[i + 1].type == TOK_AMP)
|
||||
&& input.buffer[i + 1].type == TOK_AMP)
|
||||
{
|
||||
vec_token_push(&out, token_new(TOK_AND));
|
||||
i++;
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/04 19:09:30 by rparodi ### ########.fr */
|
||||
/* Updated: 2024/10/06 13:42:24 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
|
|
@ -38,7 +38,7 @@ t_error ts_double_lcarret(t_vec_token input, t_vec_token *output)
|
|||
if (i + 1 >= input.len)
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
else if (input.buffer[i].type == TOK_LCARRET
|
||||
&& input.buffer[i + 1].type == TOK_LCARRET)
|
||||
&& input.buffer[i + 1].type == TOK_LCARRET)
|
||||
{
|
||||
vec_token_push(&out, token_new(TOK_DLCARRET));
|
||||
i++;
|
||||
|
|
@ -63,7 +63,7 @@ t_error ts_double_rcarret(t_vec_token input, t_vec_token *output)
|
|||
if (i + 1 >= input.len)
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
else if (input.buffer[i].type == TOK_RCARRET
|
||||
&& input.buffer[i + 1].type == TOK_RCARRET)
|
||||
&& input.buffer[i + 1].type == TOK_RCARRET)
|
||||
{
|
||||
vec_token_push(&out, token_new(TOK_DRCARRET));
|
||||
i++;
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/05 18:43:52 by maiboyer ### ########.fr */
|
||||
/* Updated: 2024/10/06 13:40:36 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
|
|
@ -16,6 +16,15 @@
|
|||
#include "me/vec/vec_token.h"
|
||||
#include "parser/token.h"
|
||||
|
||||
static void _fold_parens_helper(t_vec_token *v, enum e_token ty, t_const_str s)
|
||||
{
|
||||
t_token tmp;
|
||||
|
||||
tmp = token_new(ty);
|
||||
string_push(&tmp.string, s);
|
||||
vec_token_push(v, tmp);
|
||||
}
|
||||
|
||||
/// This is a sample pass
|
||||
///
|
||||
/// There is a few rules the rest of the tokenizer machinery assumes
|
||||
|
|
@ -31,7 +40,6 @@ t_error ts_double_lparen(t_vec_token input, t_vec_token *output)
|
|||
{
|
||||
t_vec_token out;
|
||||
t_usize i;
|
||||
t_token tmp;
|
||||
|
||||
i = 0;
|
||||
out = vec_token_new(input.len, token_free);
|
||||
|
|
@ -40,11 +48,9 @@ t_error ts_double_lparen(t_vec_token input, t_vec_token *output)
|
|||
if (i + 1 >= input.len)
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
else if (input.buffer[i].type == TOK_LPAREN
|
||||
&& input.buffer[i + 1].type == TOK_LPAREN)
|
||||
&& input.buffer[i + 1].type == TOK_LPAREN)
|
||||
{
|
||||
tmp = token_new(TOK_DLPAREN);
|
||||
string_push(&tmp.string, "((");
|
||||
vec_token_push(&out, tmp);
|
||||
_fold_parens_helper(&out, TOK_DLPAREN, "((");
|
||||
i++;
|
||||
}
|
||||
else
|
||||
|
|
@ -59,7 +65,6 @@ t_error ts_double_rparen(t_vec_token input, t_vec_token *output)
|
|||
{
|
||||
t_vec_token out;
|
||||
t_usize i;
|
||||
t_token tmp;
|
||||
|
||||
i = 0;
|
||||
out = vec_token_new(input.len, token_free);
|
||||
|
|
@ -68,11 +73,9 @@ t_error ts_double_rparen(t_vec_token input, t_vec_token *output)
|
|||
if (i + 1 >= input.len)
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
else if (input.buffer[i].type == TOK_RPAREN
|
||||
&& input.buffer[i + 1].type == TOK_RPAREN)
|
||||
&& input.buffer[i + 1].type == TOK_RPAREN)
|
||||
{
|
||||
tmp = token_new(TOK_DRPAREN);
|
||||
string_push(&tmp.string, "))");
|
||||
vec_token_push(&out, tmp);
|
||||
_fold_parens_helper(&out, TOK_DRPAREN, "))");
|
||||
i++;
|
||||
}
|
||||
else
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/04 19:08:48 by rparodi ### ########.fr */
|
||||
/* Updated: 2024/10/06 13:42:02 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
|
|
@ -38,7 +38,7 @@ t_error ts_double_pipe(t_vec_token input, t_vec_token *output)
|
|||
if (i + 1 >= input.len)
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
else if (input.buffer[i].type == TOK_PIPE
|
||||
&& input.buffer[i + 1].type == TOK_PIPE)
|
||||
&& input.buffer[i + 1].type == TOK_PIPE)
|
||||
{
|
||||
vec_token_push(&out, token_new(TOK_OR));
|
||||
i++;
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/05 13:19:33 by maiboyer ### ########.fr */
|
||||
/* Updated: 2024/10/06 13:43:38 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
#include "parser/passes.h"
|
||||
#include "parser/token.h"
|
||||
|
||||
bool _can_be_varname(t_token *tok)
|
||||
bool _can_be_varname(t_token *tok)
|
||||
{
|
||||
if (tok->type == TOK_DOLLAR)
|
||||
return (true);
|
||||
|
|
@ -35,7 +35,7 @@ bool _can_be_varname(t_token *tok)
|
|||
/// thus it shouldn't be freed in case of error
|
||||
/// - the output tokens may not be direct copy of the input tokens,
|
||||
/// but need to be cloned (different allocations for stuff)
|
||||
t_error ts_fold_expension(t_vec_token input, t_vec_token *output)
|
||||
t_error ts_fold_expension(t_vec_token input, t_vec_token *output)
|
||||
{
|
||||
t_vec_token out;
|
||||
t_usize i;
|
||||
|
|
@ -49,10 +49,10 @@ t_error ts_fold_expension(t_vec_token input, t_vec_token *output)
|
|||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
else if (input.buffer[i].type == TOK_DOLLAR \
|
||||
&& (input.buffer[i + 1].type == TOK_NQUOTE \
|
||||
|| _can_be_varname(&input.buffer[i+1])))
|
||||
|| _can_be_varname(&input.buffer[i + 1])))
|
||||
{
|
||||
tmp = token_clone(&input.buffer[++i]);
|
||||
tmp.type= TOK_EXPENSION;
|
||||
tmp.type = TOK_EXPENSION;
|
||||
vec_token_push(&out, tmp);
|
||||
}
|
||||
else
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/05 18:05:49 by maiboyer ### ########.fr */
|
||||
/* Updated: 2024/10/06 13:41:08 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
|
|
@ -42,7 +42,8 @@ t_error ts_fold_no_quote(t_vec_token input, t_vec_token *output)
|
|||
{
|
||||
j = 0;
|
||||
tmp = token_new(TOK_NQUOTE);
|
||||
while (i + j < input.len && token_is_noquote(input.buffer[i + j].type))
|
||||
while (i + j < input.len \
|
||||
&& token_is_noquote(input.buffer[i + j].type))
|
||||
string_push(&tmp.string, input.buffer[i + j++].string.buf);
|
||||
vec_token_push(&out, tmp);
|
||||
i += j;
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/05 18:02:25 by maiboyer ### ########.fr */
|
||||
/* Updated: 2024/10/06 13:43:02 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
|
|
@ -15,9 +15,12 @@
|
|||
#include "me/vec/vec_token.h"
|
||||
#include "parser/token.h"
|
||||
|
||||
bool _is_token_carret(enum e_token ttype)
|
||||
bool _is_token_carret(enum e_token ttype)
|
||||
{
|
||||
return (ttype == TOK_LCARRET || ttype == TOK_DLCARRET || ttype == TOK_RCARRET || ttype == TOK_DRCARRET);
|
||||
return (ttype == TOK_LCARRET \
|
||||
|| ttype == TOK_DLCARRET \
|
||||
|| ttype == TOK_RCARRET \
|
||||
|| ttype == TOK_DRCARRET);
|
||||
}
|
||||
|
||||
/// This is a sample pass
|
||||
|
|
@ -41,14 +44,18 @@ t_error ts_fold_redir(t_vec_token input, t_vec_token *output)
|
|||
out = vec_token_new(input.len, token_free);
|
||||
while (i < input.len)
|
||||
{
|
||||
if (vec_token_get(&input, i + 1) != NULL && _is_token_carret(vec_token_get(&input, i)->type) && vec_token_get(&input, i + 1)->type == TOK_WORD)
|
||||
if (vec_token_get(&input, i + 1) != NULL \
|
||||
&& _is_token_carret(vec_token_get(&input, i)->type) \
|
||||
&& vec_token_get(&input, i + 1)->type == TOK_WORD)
|
||||
{
|
||||
tmp = token_new_meta(TOK_REDIR);
|
||||
vec_token_push(&tmp.subtokens, token_clone(vec_token_get(&input, i++)));
|
||||
vec_token_push(&tmp.subtokens, token_clone(vec_token_get(&input, i++)));
|
||||
vec_token_push(&tmp.subtokens, \
|
||||
token_clone(vec_token_get(&input, i++)));
|
||||
vec_token_push(&tmp.subtokens, \
|
||||
token_clone(vec_token_get(&input, i++)));
|
||||
vec_token_push(&out, tmp);
|
||||
}
|
||||
else
|
||||
else
|
||||
vec_token_push(&out, token_clone(&input.buffer[i++]));
|
||||
}
|
||||
vec_token_free(input);
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/04 18:32:00 by maiboyer ### ########.fr */
|
||||
/* Updated: 2024/10/06 13:34:58 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
|
|
@ -38,7 +38,7 @@ t_error ts_fold_whitespace(t_vec_token input, t_vec_token *output)
|
|||
if (i + 1 >= input.len)
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
else if (input.buffer[i].type == TOK_WHITESPACE
|
||||
&& input.buffer[i + 1].type == TOK_WHITESPACE)
|
||||
&& input.buffer[i + 1].type == TOK_WHITESPACE)
|
||||
;
|
||||
else
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
|
|
|
|||
58
parser/src/passes/paren_to_nquote.c
Normal file
58
parser/src/passes/paren_to_nquote.c
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
/* ************************************************************************** */
|
||||
/* */
|
||||
/* ::: :::::::: */
|
||||
/* paren_to_nquote.c :+: :+: :+: */
|
||||
/* +:+ +:+ +:+ */
|
||||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/06 13:37:07 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
#include "me/string/string.h"
|
||||
#include "parser/passes.h"
|
||||
#include "me/types.h"
|
||||
#include "me/vec/vec_token.h"
|
||||
#include "parser/token.h"
|
||||
|
||||
static void _paren_to_nquote_helper(t_vec_token *v, char c)
|
||||
{
|
||||
t_token tmp;
|
||||
|
||||
tmp = token_new(TOK_NQUOTE);
|
||||
string_push_char(&tmp.string, c);
|
||||
vec_token_push(v, tmp);
|
||||
}
|
||||
|
||||
/// This is a sample pass
|
||||
///
|
||||
/// There is a few rules the rest of the tokenizer machinery assumes
|
||||
/// theses function follows:
|
||||
/// - the input vec WILL be freed when the function return, even in
|
||||
/// case of error
|
||||
/// - the output vector isn't populated if the function returns an error,
|
||||
/// thus it shouldn't be freed in case of error
|
||||
/// - the output tokens may not be direct copy of the input tokens,
|
||||
/// but need to be cloned (different allocations for stuff)
|
||||
|
||||
t_error ts_paren_to_noquote(t_vec_token input, t_vec_token *output)
|
||||
{
|
||||
t_vec_token out;
|
||||
t_usize i;
|
||||
|
||||
i = 0;
|
||||
out = vec_token_new(input.len, token_free);
|
||||
while (i < input.len)
|
||||
{
|
||||
if (input.buffer[i].type == TOK_LPAREN)
|
||||
_paren_to_nquote_helper(&out, '(');
|
||||
else if (input.buffer[i].type == TOK_RPAREN)
|
||||
_paren_to_nquote_helper(&out, ')');
|
||||
else
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
i++;
|
||||
}
|
||||
vec_token_free(input);
|
||||
return (*output = out, NO_ERROR);
|
||||
}
|
||||
64
parser/src/passes/split_double_paren.c
Normal file
64
parser/src/passes/split_double_paren.c
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
/* ************************************************************************** */
|
||||
/* */
|
||||
/* ::: :::::::: */
|
||||
/* split_double_paren.c :+: :+: :+: */
|
||||
/* +:+ +:+ +:+ */
|
||||
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
|
||||
/* +#+#+#+#+#+ +#+ */
|
||||
/* Created: 2024/10/02 19:04:32 by maiboyer #+# #+# */
|
||||
/* Updated: 2024/10/06 13:38:43 by maiboyer ### ########.fr */
|
||||
/* */
|
||||
/* ************************************************************************** */
|
||||
|
||||
#include "me/string/string.h"
|
||||
#include "parser/passes.h"
|
||||
#include "me/types.h"
|
||||
#include "me/vec/vec_token.h"
|
||||
#include "parser/token.h"
|
||||
|
||||
static void _split_parens_helper(t_vec_token *v, enum e_token ty, char c)
|
||||
{
|
||||
t_token tmp;
|
||||
|
||||
tmp = token_new(ty);
|
||||
string_push_char(&tmp.string, c);
|
||||
vec_token_push(v, tmp);
|
||||
}
|
||||
|
||||
/// This is a sample pass
|
||||
///
|
||||
/// There is a few rules the rest of the tokenizer machinery assumes
|
||||
/// theses function follows:
|
||||
/// - the input vec WILL be freed when the function return, even in
|
||||
/// case of error
|
||||
/// - the output vector isn't populated if the function returns an error,
|
||||
/// thus it shouldn't be freed in case of error
|
||||
/// - the output tokens may not be direct copy of the input tokens,
|
||||
/// but need to be cloned (different allocations for stuff)
|
||||
|
||||
t_error ts_split_paren(t_vec_token input, t_vec_token *output)
|
||||
{
|
||||
t_vec_token out;
|
||||
t_usize i;
|
||||
|
||||
i = 0;
|
||||
out = vec_token_new(input.len, token_free);
|
||||
while (i < input.len)
|
||||
{
|
||||
if (input.buffer[i].type == TOK_DLPAREN)
|
||||
{
|
||||
_split_parens_helper(&out, TOK_LPAREN, '(');
|
||||
_split_parens_helper(&out, TOK_LPAREN, '(');
|
||||
}
|
||||
else if (input.buffer[i].type == TOK_DRPAREN)
|
||||
{
|
||||
_split_parens_helper(&out, TOK_RPAREN, ')');
|
||||
_split_parens_helper(&out, TOK_RPAREN, ')');
|
||||
}
|
||||
else
|
||||
vec_token_push(&out, token_clone(&input.buffer[i]));
|
||||
i++;
|
||||
}
|
||||
vec_token_free(input);
|
||||
return (*output = out, NO_ERROR);
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue