0
0
mirror of https://github.com/python/cpython.git synced 2024-11-22 05:26:10 +01:00
cpython/Parser/tokenizer/tokenizer.h
Lysandros Nikolaou 01481f2dc1
gh-104169: Refactor tokenizer into lexer and wrappers (#110684)
* The lexer, which include the actual lexeme producing logic, goes into
  the `lexer` directory.
* The wrappers, one wrapper per input mode (file, string, utf-8, and
  readline), go into the `tokenizer` directory and include logic for
  creating a lexer instance and managing the buffer for different modes.
---------

Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
Co-authored-by: blurb-it[bot] <43283697+blurb-it[bot]@users.noreply.github.com>
2023-10-11 15:14:44 +00:00

15 lines
476 B
C

#ifndef Py_TOKENIZER_H
#define Py_TOKENIZER_H
#include "Python.h"
struct tok_state *_PyTokenizer_FromString(const char *, int, int);
struct tok_state *_PyTokenizer_FromUTF8(const char *, int, int);
struct tok_state *_PyTokenizer_FromReadline(PyObject*, const char*, int, int);
struct tok_state *_PyTokenizer_FromFile(FILE *, const char*,
const char *, const char *);
#define tok_dump _Py_tok_dump
#endif /* !Py_TOKENIZER_H */