Updated Grammar
This commit is contained in:
parent
74d642f297
commit
175efe0f2f
1246 changed files with 109558 additions and 114993 deletions
|
|
@ -15,94 +15,70 @@
|
|||
bool lex_normal_s485(t_lexer *lexer, t_lexer_state *s)
|
||||
{
|
||||
lex_accept_token(sym_word, lexer, s);
|
||||
if (lex_normal_map485(lexer, s))
|
||||
return (true);
|
||||
if (s->lookahead == '#')
|
||||
return (lex_advance(498, lexer, s));
|
||||
if (s->lookahead == '\\')
|
||||
return (lex_advance(163, lexer, s));
|
||||
if ((s->lookahead >= 0x0b && s->lookahead <= '\r'))
|
||||
return (lex_advance(485, lexer, s));
|
||||
if ((s->lookahead >= '1' && s->lookahead <= '9'))
|
||||
return (lex_advance(408, lexer, s));
|
||||
if (((s->lookahead >= 'A' && s->lookahead <= 'Z') || \
|
||||
(s->lookahead >= 'a' && s->lookahead <= 'z')))
|
||||
return (lex_advance(440, lexer, s));
|
||||
if ((!s->eof && set_contains(sym_word_character_set_1(), \
|
||||
9, s->lookahead)))
|
||||
return (lex_advance(528, lexer, s));
|
||||
return (lex_advance(497, lexer, s));
|
||||
return (lex_end_state(lexer, s));
|
||||
}
|
||||
|
||||
bool lex_normal_s486(t_lexer *lexer, t_lexer_state *s)
|
||||
{
|
||||
lex_accept_token(sym_word, lexer, s);
|
||||
if (s->lookahead == '\n')
|
||||
return (lex_advance(319, lexer, s));
|
||||
if (s->lookahead == '#')
|
||||
return (lex_advance(529, lexer, s));
|
||||
if (s->lookahead == '\\')
|
||||
return (lex_advance(150, lexer, s));
|
||||
if ((s->lookahead >= 0x0b && s->lookahead <= '\r'))
|
||||
return (lex_advance(486, lexer, s));
|
||||
if ((s->lookahead >= '0' && s->lookahead <= '9'))
|
||||
return (lex_advance(407, lexer, s));
|
||||
if ((!s->eof && set_contains(sym_word_character_set_1(), \
|
||||
9, s->lookahead)))
|
||||
return (lex_advance(528, lexer, s));
|
||||
return (lex_advance(218, lexer, s));
|
||||
if (s->lookahead == 'a')
|
||||
return (lex_advance(487, lexer, s));
|
||||
if ((!s->eof && \
|
||||
set_contains(sym__comment_word_character_set_1(), 10, \
|
||||
s->lookahead)))
|
||||
return (lex_advance(497, lexer, s));
|
||||
return (lex_end_state(lexer, s));
|
||||
}
|
||||
|
||||
bool lex_normal_s487(t_lexer *lexer, t_lexer_state *s)
|
||||
{
|
||||
lex_accept_token(sym_word, lexer, s);
|
||||
if (s->lookahead == '\n')
|
||||
return (lex_advance(320, lexer, s));
|
||||
if (s->lookahead == '#')
|
||||
return (lex_advance(529, lexer, s));
|
||||
if (s->lookahead == '\\')
|
||||
return (lex_advance(156, lexer, s));
|
||||
if (s->lookahead == 'e')
|
||||
return (lex_advance(527, lexer, s));
|
||||
if ((s->lookahead >= 0x0b && s->lookahead <= '\r'))
|
||||
return (lex_advance(487, lexer, s));
|
||||
if ((s->lookahead >= '0' && s->lookahead <= '9'))
|
||||
return (lex_advance(407, lexer, s));
|
||||
if ((!s->eof && set_contains(sym_word_character_set_1(), \
|
||||
9, s->lookahead)))
|
||||
return (lex_advance(528, lexer, s));
|
||||
return (lex_advance(218, lexer, s));
|
||||
if (s->lookahead == 'c')
|
||||
return (lex_advance(248, lexer, s));
|
||||
if ((!s->eof && \
|
||||
set_contains(sym__comment_word_character_set_1(), 10, \
|
||||
s->lookahead)))
|
||||
return (lex_advance(497, lexer, s));
|
||||
return (lex_end_state(lexer, s));
|
||||
}
|
||||
|
||||
bool lex_normal_s488(t_lexer *lexer, t_lexer_state *s)
|
||||
{
|
||||
lex_accept_token(sym_word, lexer, s);
|
||||
if (lex_normal_map488(lexer, s))
|
||||
return (true);
|
||||
if ((s->lookahead >= 0x0b && s->lookahead <= '\r'))
|
||||
return (lex_advance(488, lexer, s));
|
||||
if ((s->lookahead >= '1' && s->lookahead <= '9'))
|
||||
return (lex_advance(408, lexer, s));
|
||||
if (((s->lookahead >= 'A' && s->lookahead <= 'Z') || \
|
||||
(s->lookahead >= 'a' && s->lookahead <= 'z')))
|
||||
return (lex_advance(440, lexer, s));
|
||||
if ((!s->eof && set_contains(sym_word_character_set_1(), \
|
||||
9, s->lookahead)))
|
||||
return (lex_advance(528, lexer, s));
|
||||
if (s->lookahead == '\\')
|
||||
return (lex_advance(218, lexer, s));
|
||||
if (s->lookahead == 'e')
|
||||
return (lex_advance(246, lexer, s));
|
||||
if ((!s->eof && \
|
||||
set_contains(sym__comment_word_character_set_1(), 10, \
|
||||
s->lookahead)))
|
||||
return (lex_advance(497, lexer, s));
|
||||
return (lex_end_state(lexer, s));
|
||||
}
|
||||
|
||||
bool lex_normal_s489(t_lexer *lexer, t_lexer_state *s)
|
||||
{
|
||||
lex_accept_token(sym_word, lexer, s);
|
||||
if (s->lookahead == '\n')
|
||||
return (lex_advance(323, lexer, s));
|
||||
if (s->lookahead == '#')
|
||||
return (lex_advance(529, lexer, s));
|
||||
if (s->lookahead == '\\')
|
||||
return (lex_advance(152, lexer, s));
|
||||
if ((s->lookahead >= 0x0b && s->lookahead <= '\r'))
|
||||
return (lex_advance(489, lexer, s));
|
||||
if ((s->lookahead >= '0' && s->lookahead <= '9'))
|
||||
return (lex_advance(407, lexer, s));
|
||||
if ((!s->eof && set_contains(sym_word_character_set_1(), \
|
||||
9, s->lookahead)))
|
||||
return (lex_advance(528, lexer, s));
|
||||
return (lex_advance(218, lexer, s));
|
||||
if (s->lookahead == 'f')
|
||||
return (lex_advance(244, lexer, s));
|
||||
if ((!s->eof && \
|
||||
set_contains(sym__comment_word_character_set_1(), 10, \
|
||||
s->lookahead)))
|
||||
return (lex_advance(497, lexer, s));
|
||||
return (lex_end_state(lexer, s));
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue