1
0
forked from GitHub/gf-core

This patch removes Gregoire's parse_tokens function in the python binding and adds another implementation which builds on the existing API for lexers in the C runtime. Now it is possible to write incremental Lexers in Python

This commit is contained in:
kr.angelov
2013-02-01 09:29:43 +00:00
parent c99ab058ea
commit 5e2474e346
8 changed files with 113 additions and 137 deletions

View File

@@ -223,37 +223,13 @@ pgf_parse(PgfConcr* concr, PgfCId cat, PgfLexer *lexer, GuPool* pool)
tok = pgf_lexer_read_token(lexer, lex_err);
}
if (gu_exn_caught(lex_err) != gu_type(GuEOF))
return NULL;
// Now begin enumerating the resulting syntax trees
return pgf_parse_result(state, pool);
}
// Same as previous but accept a list of tokens as input instead of a
// lexer
GuEnum*
pgf_parse_tokens(PgfConcr* concr, PgfCId cat, char **tokens, int len, GuPool* pool)
{
// Begin parsing a sentence of the specified category
PgfParseState* state =
pgf_parser_init_state(concr, cat, 0, pool);
if (state == NULL) {
return NULL;
}
// Parsing
PgfToken tok;
for (int i = 0; i < len; i++) {
tok = gu_str_string(tokens[i], pool);
state = pgf_parser_next_state(state, tok, pool);
if (state == NULL) {
return NULL;
}
}
// Now begin enumerating the resulting syntax trees
return pgf_parse_result(state, pool);
}
void
pgf_print_chunks(PgfConcr* concr, PgfCId cat, PgfLexer *lexer, GuPool* pool)
{