update: exec works !

This commit is contained in:
maix0 2024-10-10 17:41:23 +02:00
parent 2363fadd02
commit 77e7f65b41
24 changed files with 192 additions and 499 deletions

View file

@ -6,7 +6,7 @@
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2024/10/02 18:41:16 by maiboyer #+# #+# */
/* Updated: 2024/10/10 15:27:12 by maiboyer ### ########.fr */
/* Updated: 2024/10/10 17:25:40 by maiboyer ### ########.fr */
/* */
/* ************************************************************************** */
@ -70,7 +70,6 @@ t_error ts_apply_passes(t_vec_token ts, t_vec_token *out)
else
me_printf("Applied '%s' pass\n", g_ts_passes[i].name);
ts = next;
ts_print(&ts);
i++;
}
return (*out = ts, NO_ERROR);
@ -98,7 +97,6 @@ t_error ts_dq_apply_passes(t_vec_token ts, t_vec_token *out)
else
me_printf("Applied '%s' dq_pass\n", g_ts_dq_passes[i].name);
ts = next;
ts_print(&ts);
i++;
}
return (*out = ts, NO_ERROR);

View file

@ -6,7 +6,7 @@
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2024/10/09 12:44:53 by maiboyer #+# #+# */
/* Updated: 2024/10/10 15:16:38 by maiboyer ### ########.fr */
/* Updated: 2024/10/10 17:19:30 by maiboyer ### ########.fr */
/* */
/* ************************************************************************** */
@ -47,7 +47,7 @@ static t_error _create_ast_redir(enum e_token ty, t_ast_node *out)
return (*out = ret, NO_ERROR);
}
t_const_str _token_to_string(t_token *arg)
t_const_str _token_to_string(t_token *arg, bool dollar_exp)
{
t_usize i;
t_string s;
@ -58,7 +58,7 @@ t_const_str _token_to_string(t_token *arg)
s = string_new(16);
if (arg->string.buf != NULL)
{
if (arg->type == TOK_EXPENSION)
if (dollar_exp && arg->type == TOK_EXPENSION)
string_push_char(&s, '$');
string_push(&s, arg->string.buf);
}
@ -67,7 +67,7 @@ t_const_str _token_to_string(t_token *arg)
i = 0;
while (i < arg->subtokens.len)
{
tmp = _token_to_string(&arg->subtokens.buffer[i++]);
tmp = _token_to_string(&arg->subtokens.buffer[i++], false);
string_push(&s, tmp);
str_free((t_str)tmp);
}
@ -80,7 +80,7 @@ static t_error _ast_set_redir_arg(t_ast_node node, t_token *arg)
if (node == NULL || arg == NULL || (node->kind != AST_HEREDOC_REDIRECTION && node->kind != AST_FILE_REDIRECTION))
return (ERROR);
if (node->kind == AST_HEREDOC_REDIRECTION)
node->data.heredoc_redirection.delimiter = (t_str)_token_to_string(arg);
node->data.heredoc_redirection.delimiter = (t_str)_token_to_string(arg, true);
else if (handle_tok_word_inner(arg, &node->data.file_redirection.output))
return (ERROR);
return (NO_ERROR);
@ -104,7 +104,7 @@ t_error _tok_word_expansion(t_token *tok, t_ast_node *out)
t_ast_node ret;
ret = ast_alloc(AST_EXPANSION);
ret->data.expansion.var_name = (t_str)_token_to_string(tok);
ret->data.expansion.var_name = (t_str)_token_to_string(tok, false);
return (*out = ret, NO_ERROR);
}
t_error _tok_word_nquote(t_token *tok, t_ast_node *out)
@ -115,7 +115,7 @@ t_error _tok_word_nquote(t_token *tok, t_ast_node *out)
ret = ast_alloc(AST_WORD);
ret->data.word.kind = AST_WORD_NO_QUOTE;
tmp = ast_alloc(AST_RAW_STRING);
tmp->data.raw_string.str = (t_str)_token_to_string(tok);
tmp->data.raw_string.str = (t_str)_token_to_string(tok, false);
vec_ast_push(&ret->data.word.inner, tmp);
return (*out = ret, NO_ERROR);
}
@ -127,7 +127,7 @@ t_error _tok_word_squote(t_token *tok, t_ast_node *out)
ret = ast_alloc(AST_WORD);
ret->data.word.kind = AST_WORD_SINGLE_QUOTE;
tmp = ast_alloc(AST_RAW_STRING);
tmp->data.raw_string.str = (t_str)_token_to_string(tok);
tmp->data.raw_string.str = (t_str)_token_to_string(tok, false);
vec_ast_push(&ret->data.word.inner, tmp);
return (*out = ret, NO_ERROR);
}
@ -142,7 +142,7 @@ t_error _tok_word_dquote(t_token *tok, t_ast_node *out)
i = 0;
while (i < tok->subtokens.len)
{
if (_tok_word(&tok->subtokens.buffer[i], &tmp))
if (_tok_word(&tok->subtokens.buffer[i++], &tmp))
return (ast_free(ret), ERROR);
vec_ast_push(&ret->data.word.inner, tmp);
}
@ -221,13 +221,13 @@ t_error handle_tok_redir(t_ast_node cmd, t_token *tok)
/// les noms peuvent etre different idk
/// a terme la fonction utilisera t_error et tt;
/// struct s_ast_command `ast/include/ast/_raw_structs.h`
t_ast_node ast_from_cmd(t_token tok)
t_error ast_from_cmd(t_token tok, t_vec_ast *output_queue)
{
t_ast_node ret;
t_usize i;
if (tok.type != TOK_CMD)
me_abort("tok.type != TOK_CMD");
return (ERROR);
ret = ast_alloc(AST_COMMAND);
i = 0;
while (i < tok.subtokens.len)
@ -235,15 +235,17 @@ t_ast_node ast_from_cmd(t_token tok)
if (tok.subtokens.buffer[i].type == TOK_REDIR)
{
if (handle_tok_redir(ret, &tok.subtokens.buffer[i]))
me_abort("handle_tok_redir error");
return (ast_free(ret), ERROR);
}
else if (tok.subtokens.buffer[i].type == TOK_WORD)
{
if (handle_tok_word(ret, &tok.subtokens.buffer[i]))
me_abort("handle_tok_word error");
return (ast_free(ret), ERROR);
}
else
me_abort("handle_tok_cmd not word|redir");
return (ast_free(ret), ERROR);
i++;
}
return (ret);
token_free(tok);
return (vec_ast_push(output_queue, ret), NO_ERROR);
}

View file

@ -6,7 +6,7 @@
/* By: maiboyer <maiboyer@student.42.fr> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2024/10/09 12:44:53 by maiboyer #+# #+# */
/* Updated: 2024/10/10 16:22:39 by maiboyer ### ########.fr */
/* Updated: 2024/10/10 17:26:38 by maiboyer ### ########.fr */
/* */
/* ************************************************************************** */
@ -16,6 +16,40 @@
#include "me/vec/vec_token.h"
#include "parser/token.h"
static enum e_ast_list_kind _ast_list_get_op(enum e_token ty)
{
if (ty == TOK_AND)
return (AST_LIST_AND);
else if (ty == TOK_OR)
return (AST_LIST_OR);
me_abort("invalid token type for ast_list operator");
return (-1);
}
static t_error _tok_pipeline(t_vec_ast *output_queue, t_ast_node rhs, t_ast_node lhs)
{
t_ast_node ret;
if (rhs->kind == AST_PIPELINE)
{
vec_ast_push_front(&rhs->data.pipeline.statements, lhs);
vec_ast_push(output_queue, rhs);
}
else if (lhs->kind == AST_PIPELINE)
{
vec_ast_push(&lhs->data.pipeline.statements, rhs);
vec_ast_push(output_queue, lhs);
}
else
{
ret = ast_alloc(AST_PIPELINE);
vec_ast_push(&ret->data.pipeline.statements, lhs);
vec_ast_push(&ret->data.pipeline.statements, rhs);
vec_ast_push(output_queue, ret);
}
return (NO_ERROR);
}
/// en fonction de op, qui peut etre: TOK_AND TOK_PIPE TOK_OR
/// choisir le bon ast_node a faire (t_ast_node->data.list + set operator ou t_asdt_node->data.pipeline)
/// pop deux element de output_queue. pour l'instant la fonction doit print une error si il n'y as pas asser d'element
@ -30,39 +64,28 @@
///
/// in the end we should change to using `t_error` and pushing the ast_node directly to output_queue in the function,
/// will change that later tho :)
t_ast_node ast_from_op(t_token tok, t_vec_ast *output_queue)
t_error ast_from_op(t_token tok, t_vec_ast *output_queue)
{
t_ast_node ret;
t_ast_node tmp;
t_ast_node lhs;
t_ast_node rhs;
// this needs have a protection in case output_queue is smaller than 2 elements
// otherwise it is good :)
// you could also make it so TOK_AND and TOK_OR share the same code to win some lines
ret = NULL;
if (tok.type == TOK_AND)
if (!(tok.type == TOK_AND || tok.type == TOK_OR || tok.type == TOK_PIPE))
return (ERROR);
if (output_queue == NULL || output_queue->len < 2)
return (ERROR);
vec_ast_pop(output_queue, &rhs);
vec_ast_pop(output_queue, &lhs);
if (tok.type == TOK_AND || tok.type == TOK_OR)
{
ret = ast_alloc(AST_LIST);
ret->data.list.op = AST_LIST_AND;
vec_ast_pop(output_queue, &ret->data.list.right);
vec_ast_pop(output_queue, &ret->data.list.left);
ret->data.list.op = _ast_list_get_op(tok.type);
ret->data.list.left = lhs;
ret->data.list.right = rhs;
vec_ast_push(output_queue, ret);
}
else if (tok.type == TOK_OR)
{
ret = ast_alloc(AST_LIST);
ret->data.list.op = AST_LIST_OR;
vec_ast_pop(output_queue, &ret->data.list.right);
vec_ast_pop(output_queue, &ret->data.list.left);
}
else if (tok.type == TOK_PIPE)
{
// Here there is some kind of optimization that could be done in the future: if one node is already a AST_PIPELINE, just pus the other node into the right place in it and return the non created AST_PIPELINE node !
ret = ast_alloc(AST_PIPELINE);
vec_ast_pop(output_queue, &tmp);
vec_ast_push(&ret->data.pipeline.statements, tmp);
vec_ast_pop(output_queue, &tmp);
vec_ast_push(&ret->data.pipeline.statements, tmp);
}
else
me_abort("ast_from_op not the good token type gived !\n");
return (ret);
else if (tok.type == TOK_PIPE && _tok_pipeline(output_queue, rhs, lhs))
return (ERROR);
token_free(tok);
return (NO_ERROR);
}

View file

@ -6,7 +6,7 @@
/* By: rparodi <rparodi@student.42.fr> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2024/10/07 18:04:13 by rparodi #+# #+# */
/* Updated: 2024/10/09 12:44:24 by maiboyer ### ########.fr */
/* Updated: 2024/10/10 17:28:21 by maiboyer ### ########.fr */
/* */
/* ************************************************************************** */
@ -37,7 +37,7 @@ t_str token_name(t_token *token);
/// les noms peuvent etre different idk
/// a terme la fonction utilisera t_error et tt;
/// struct s_ast_command `ast/include/ast/_raw_structs.h`
t_ast_node ast_from_cmd(t_token tok);
t_error ast_from_cmd(t_token tok, t_vec_ast *output_queue);
/// en fonction de op, qui peut etre: TOK_AND TOK_PIPE TOK_OR
/// choisir le bon ast_node a faire (t_ast_node->data.list + set operator ou t_asdt_node->data.pipeline)
@ -49,7 +49,7 @@ t_ast_node ast_from_cmd(t_token tok);
/// struct s_ast_list if (tok.type == TOK_AND || tok.type == TOK_OR)
/// struct s_ast_pipeline if (tok.type == TOK_PIPE)
/// `ast/include/ast/_raw_structs.h`
t_ast_node ast_from_op(t_token tok, t_vec_ast *output_queue);
t_error ast_from_op(t_token tok, t_vec_ast *output_queue);
t_error yarn(t_vec_token ts, t_vec_ast *out)
{
@ -63,7 +63,10 @@ t_error yarn(t_vec_token ts, t_vec_ast *out)
while (!vec_token_pop_front(&ts, &tok))
{
if (tok.type == TOK_CMD)
vec_ast_push(&output_queue, ast_from_cmd(tok));
{
if (ast_from_cmd(tok, &output_queue))
return (vec_token_free(stack), vec_ast_free(output_queue), token_free(tok), ERROR);
}
else if (tok.type == TOK_LPAREN)
vec_token_push(&stack, tok);
else if (tok.type == TOK_OR || tok.type == TOK_AND || tok.type == TOK_PIPE)
@ -71,31 +74,38 @@ t_error yarn(t_vec_token ts, t_vec_ast *out)
while (vec_token_last(&stack) != NULL && vec_token_last(&stack)->type != TOK_LPAREN && _get_precedance(vec_token_last(&stack)) > _get_precedance(&tok))
{
vec_token_pop(&stack, &op);
vec_ast_push(&output_queue, ast_from_op(op, &output_queue));
if (ast_from_op(op, &output_queue))
return (vec_token_free(stack), vec_ast_free(output_queue), token_free(tok), token_free(op), ERROR);
}
vec_token_push(&stack, tok);
}
else if (tok.type == TOK_RPAREN)
{
token_free(tok);
// ici il faut modifier pour push dans un ast_node->data.subshell
// je m'occuperai de ca ce soir/after
while (vec_token_last(&stack) != NULL && vec_token_last(&stack)->type != TOK_LPAREN)
{
vec_token_pop(&stack, &op);
vec_ast_push(&output_queue, ast_from_op(op, &output_queue));
if (ast_from_op(op, &output_queue))
return (vec_token_free(stack), vec_ast_free(output_queue), token_free(op), ERROR);
}
if (!(vec_token_last(&stack) != NULL && vec_token_last(&stack)->type == TOK_LPAREN))
return (ERROR);
return (vec_token_free(stack), vec_ast_free(output_queue), ERROR);
vec_token_pop(&stack, &tok);
token_free(tok);
t_ast_node snode;
t_ast_node tmp;
snode = ast_alloc(AST_SUBSHELL);
vec_ast_pop(&output_queue, &tmp);
vec_ast_push(&snode->data.subshell.body, tmp);
vec_ast_push(&output_queue, snode);
}
}
while (!vec_token_pop(&stack, &op))
{
if (op.type == TOK_LPAREN)
return (token_free(tok), ERROR);
vec_ast_push(&output_queue, ast_from_op(op, &output_queue));
return (token_free(op), ERROR);
if (ast_from_op(op, &output_queue))
return (vec_token_free(stack), vec_ast_free(output_queue), token_free(op), ERROR);
}
vec_token_free(ts);
vec_token_free(stack);

View file

@ -1 +0,0 @@
__pycache__

View file

@ -1,12 +0,0 @@
import collapse
import concat
import str_to_token
import ttoken
s = input("> ")
print(s)
tokens = str_to_token.str_to_token(s)
concated_tokens = concat.concat(tokens)
collapsed_tokens = collapse.collapse(concated_tokens)
ttoken.print_tokenlist(collapsed_tokens)

View file

@ -1,38 +0,0 @@
from ttoken import *
TT = TokenType
# This function will transform some tokens into others depending on what follows them
def collapse(tokens: list[Token]):
i = 0
out = []
while i < len(tokens):
tok = tokens[i]
peek = tokens[i + 1] if i + 1 < len(tokens) else None
if peek is None:
out.append(tok)
i += 1
continue
if tok.ty == TT.PIPE and peek.ty == TT.PIPE:
out.append(Token(TT.OR, string="||"))
i += 2
elif tok.ty == TT.AMP and peek.ty == TT.AMP:
out.append(Token(TT.AND, string="&&"))
i += 2
elif tok.ty == TT.CARRET and tok.string == "<" and peek.ty == TT.CARRET and peek.string == "<":
out.append(Token(TT.DLCARRET, string="<<"))
i += 2
elif tok.ty == TT.CARRET and tok.string == ">" and peek.ty == TT.CARRET and peek.string == ">":
out.append(Token(TT.DRCARRET, string=">>"))
i += 2
elif tok.ty == TT.CARRET and tok.string == "<" :
out.append(Token(TT.LCARRET, string="<"))
i += 1
elif tok.ty == TT.CARRET and tok.string == ">" :
out.append(Token(TT.RCARRET, string=">"))
i += 1
else:
out.append(tok)
i += 1
return out

View file

@ -1,25 +0,0 @@
from ttoken import *
# This function will make a "big" token that will represent a word in the shell sense
def concat(tokens: list[Token]) -> list[Token]:
i = 0
out = []
while i < len(tokens):
tok = tokens[i]
# if the token is a token that can be inside a word, then we start createing a WORD "metaToken"
if tok.is_word():
word = Token(TokenType.WORD, subtokens=[])
word.subtokens.append(tok)
j = 1
# then we get every token after the first that is also a word and we push them
while i + j < len(tokens) and (tokens[i + j]).is_word():
word.subtokens.append(tokens[i + j])
j += 1
i += j
out.append(word)
else:
# otherwise we just push the token alone
out.append(tok)
i += 1
return out

View file

@ -1,60 +0,0 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1726560853,
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1726931411,
"narHash": "sha256-Oxfw+YhT/RDdOmzYbtrFSkU2SwdO7UfbjXWuU6Bo4+o=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "c0e65bb8293c21f3aa0fdc9fae8dcccec187c1cf",
"type": "github"
},
"original": {
"owner": "nixos",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,35 +0,0 @@
{
description = "Flake utils demo";
inputs.nixpkgs.url = "github:nixos/nixpkgs";
inputs.flake-utils.url = "github:numtide/flake-utils";
outputs = {
self,
nixpkgs,
flake-utils,
}:
flake-utils.lib.eachDefaultSystem (
system: let
pkgs = nixpkgs.legacyPackages.${system};
in {
devShell = pkgs.mkShell {
packages = with pkgs;
[
gnumake
llvmPackages_18.bintools
tokei
coreutils
python312
tree
]
++ (
if system == "x86_64-linux"
then [valgrind valgrind.dev]
else []
);
#ASAN_OPTIONS = "strict_string_checks=1:detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1";
};
}
);
}

View file

@ -1,107 +0,0 @@
from ttoken import *
TT = TokenType
def is_quote(c: str) -> bool:
return c == "'" or c == '"'
# This function takes the string and seperate them into different tokens depending on the quotes
def str_to_token(s: str) -> list[Token]:
tokens = []
current_token = None
quote = 0
i = 0
while i < len(s):
c = s[i]
if quote == 0:
# if we have a quote, juste push the current token if any, then switch the the correct quote token
if is_quote(c):
if current_token != None:
tokens.append(current_token)
quote = c
current_token = Token(TT.DQUOTE if c == '"' else TT.SQUOTE, string="")
else:
# here we have no quote, so we first create a token if none exist, then handle special stuff
# like whitespace for example, or any character we want to spit in a single token of their own (; $ | &)
if current_token == None:
current_token = Token(TT.NQUOTE, string="")
if c.isspace():
# we have a whitespace, then create a whitespace token, and push the current token
# if it isn't empty and not whitesace
if (
len(current_token.string) != 0
and current_token.ty != TT.WHITESPACE
):
tokens.append(current_token)
current_token = Token(TT.WHITESPACE, string="")
i += 1;
continue;
else:
# we DON'T have a whitespace, then if the current token is a whitespace, just push it and set the new token to raw_string
if current_token.ty == TT.WHITESPACE:
tokens.append(current_token)
current_token = Token(TT.NQUOTE, string="")
if c == "$":
tokens.append(current_token)
current_token = None
tokens.append(Token(TT.DOLLAR, string="$"))
elif c == "(":
tokens.append(current_token)
current_token = None
tokens.append(Token(TT.LPAREN, string="("))
elif c == ")":
tokens.append(current_token)
current_token = None
tokens.append(Token(TT.RPAREN, string=")"))
elif c == "|":
tokens.append(current_token)
current_token = None
tokens.append(Token(TT.PIPE, string="|"))
elif c == "&":
tokens.append(current_token)
current_token = None
tokens.append(Token(TT.AMP, string="&"))
elif c == ";":
tokens.append(current_token)
current_token = None
tokens.append(Token(TT.SEMICOLON, string=";"))
elif c == ">" or c == "<":
tokens.append(current_token)
current_token = None
tokens.append(Token(TT.CARRET, string=c))
else:
current_token.append_char(c)
elif quote == "'":
# we are in a single quotem basically we push until we have another single quote
if c == "'":
tokens.append(current_token)
current_token = None
quote = 0
else:
if current_token == None:
current_token = Token(TT.SQUOTE, string="")
current_token.append_char(c)
elif quote == '"':
# we are in a double quotem basically we push until we have another double quote
if c == '"':
tokens.append(current_token)
current_token = None
quote = 0
else:
if current_token == None:
current_token = Token(TT.DQUOTE, string="")
current_token.append_char(c)
else:
print("you fucked up you quote thingy")
i += 1
# if the current token is not none and the current token is "no quote" then we push it
if current_token != None and current_token.ty == TT.NQUOTE:
tokens.append(current_token)
# cleanup the empty tokens that may be here
out = []
for tok in tokens:
if not (tok.ty == TT.NQUOTE and len(tok.string) == 0):
out.append(tok)
return out

View file

@ -1,63 +0,0 @@
from enum import Enum
from dataclasses import dataclass
TokenType = Enum(
"TokenType",
[
"AMP", # ampersand == &
"AND", # and == &&
"CARRET", # any carret == < > << >>
"DLCARRET", # double left carret == <<
"DOLLAR", # dollar == $
"DQUOTE", # double quote string
"DRCARRET", # double right carret == >>
"EXPENSION", # an expension == $<no_quote_word>
"LCARRET", # left carret == <
"LPAREN", # left parenthesis == (
"NQUOTE", # no quote string
"OR", # or == ||
"PIPE", # pipe == |
"RCARRET", # right carret == >
"RPAREN", # right parenthesis == )
"SEMICOLON", # semicolor == ;
"SQUOTE", # single quote string
"WHITESPACE", # whitespace outside of quoted strings
"WORD", # a meta token, which contains subtokens
],
)
@dataclass
class Token:
ty: TokenType
string: str = None
subtokens: list = None
def is_metatoken(self) -> bool:
return self.subtokens != None
def append_char(self, c: str):
if self.string is None:
raise Exception(
f"Tried to push a char on a token that contains subtokens, TT={self.ty}"
)
self.string += c
def is_word(self):
return (
self.ty == TokenType.SQUOTE
or self.ty == TokenType.DQUOTE
or self.ty == TokenType.NQUOTE
or self.ty == TokenType.DOLLAR
)
def print_tokenlist(tokens: list[Token], *, depth=0):
for tok in tokens:
if tok.is_metatoken():
print_tokenlist(tok.subtokens, depth=depth + 1)
else:
print(f"{'\t' * depth}{tok.ty.name:>10} => \x1b[31;40m{tok.string}\x1b[0m")
__all__ = ["TokenType", "Token", "print_tokenlist"]