circuitpython/py/lexer.h

213 lines
6.3 KiB
C
Raw Normal View History

/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
2020-06-03 18:40:05 -04:00
* SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MICROPY_INCLUDED_PY_LEXER_H
#define MICROPY_INCLUDED_PY_LEXER_H
#include <stdint.h>
#include "py/mpconfig.h"
#include "py/qstr.h"
#include "py/reader.h"
/* lexer.h -- simple tokeniser for MicroPython
*
* Uses (byte) length instead of null termination.
* Tokens are the same - UTF-8 with (byte) length.
2013-10-04 14:53:11 -04:00
*/
typedef enum _mp_token_kind_t {
MP_TOKEN_END,
MP_TOKEN_INVALID,
MP_TOKEN_DEDENT_MISMATCH,
MP_TOKEN_LONELY_STRING_OPEN,
#if MICROPY_PY_FSTRINGS
MP_TOKEN_MALFORMED_FSTRING,
py: Implement partial PEP-498 (f-string) support This implements (most of) the PEP-498 spec for f-strings, with two exceptions: - raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError` - one special corner case does not function as specified in the PEP (more on that in a moment) This is implemented in the core as a syntax translation, brute-forcing all f-strings to run through `String.format`. For example, the statement `x='world'; print(f'hello {x}')` gets translated *at a syntax level* (injected into the lexer) to `x='world'; print('hello {}'.format(x))`. While this may lead to weird column results in tracebacks, it seemed like the fastest, most efficient, and *likely* most RAM-friendly option, despite being implemented under the hood with a completely separate `vstr_t`. Since [string concatenation of adjacent literals is implemented in the lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957), two side effects emerge: - All strings with at least one f-string portion are concatenated into a single literal which *must* be run through `String.format()` wholesale, and: - Concatenation of a raw string with interpolation characters with an f-string will cause `IndexError`/`KeyError`, which is both different from CPython *and* different from the corner case mentioned in the PEP (which gave an example of the following:) ```python x = 10 y = 'hi' assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de' ``` The above-linked commit detailed a pretty solid case for leaving string concatenation in the lexer rather than putting it in the parser, and undoing that decision would likely be disproportionately costly on resources for the sake of a probably-low-impact corner case. An alternative to become complaint with this corner case of the PEP would be to revert to string concatenation in the parser *only when an f-string is part of concatenation*, though I've done no investigation on the difficulty or costs of doing this. A decent set of tests is included. I've manually tested this on the `unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and things seem sane.
2019-08-11 00:27:20 -04:00
MP_TOKEN_FSTRING_RAW,
2021-03-15 09:57:36 -04:00
#endif
MP_TOKEN_NEWLINE,
MP_TOKEN_INDENT,
MP_TOKEN_DEDENT,
MP_TOKEN_NAME,
MP_TOKEN_INTEGER,
MP_TOKEN_FLOAT_OR_IMAG,
MP_TOKEN_STRING,
MP_TOKEN_BYTES,
2014-01-04 13:44:46 -05:00
MP_TOKEN_ELLIPSIS,
MP_TOKEN_KW_FALSE,
MP_TOKEN_KW_NONE,
MP_TOKEN_KW_TRUE,
MP_TOKEN_KW___DEBUG__,
MP_TOKEN_KW_AND,
MP_TOKEN_KW_AS,
MP_TOKEN_KW_ASSERT,
#if MICROPY_PY_ASYNC_AWAIT
MP_TOKEN_KW_ASYNC,
MP_TOKEN_KW_AWAIT,
#endif
MP_TOKEN_KW_BREAK,
MP_TOKEN_KW_CLASS,
MP_TOKEN_KW_CONTINUE,
MP_TOKEN_KW_DEF,
MP_TOKEN_KW_DEL,
MP_TOKEN_KW_ELIF,
MP_TOKEN_KW_ELSE,
MP_TOKEN_KW_EXCEPT,
MP_TOKEN_KW_FINALLY,
MP_TOKEN_KW_FOR,
MP_TOKEN_KW_FROM,
MP_TOKEN_KW_GLOBAL,
MP_TOKEN_KW_IF,
MP_TOKEN_KW_IMPORT,
MP_TOKEN_KW_IN,
MP_TOKEN_KW_IS,
MP_TOKEN_KW_LAMBDA,
MP_TOKEN_KW_NONLOCAL,
MP_TOKEN_KW_NOT,
MP_TOKEN_KW_OR,
MP_TOKEN_KW_PASS,
MP_TOKEN_KW_RAISE,
MP_TOKEN_KW_RETURN,
MP_TOKEN_KW_TRY,
MP_TOKEN_KW_WHILE,
MP_TOKEN_KW_WITH,
MP_TOKEN_KW_YIELD,
MP_TOKEN_OP_ASSIGN,
MP_TOKEN_OP_TILDE,
// Order of these 6 matches corresponding mp_binary_op_t operator
MP_TOKEN_OP_LESS,
MP_TOKEN_OP_MORE,
MP_TOKEN_OP_DBL_EQUAL,
MP_TOKEN_OP_LESS_EQUAL,
MP_TOKEN_OP_MORE_EQUAL,
MP_TOKEN_OP_NOT_EQUAL,
// Order of these 13 matches corresponding mp_binary_op_t operator
MP_TOKEN_OP_PIPE,
MP_TOKEN_OP_CARET,
MP_TOKEN_OP_AMPERSAND,
MP_TOKEN_OP_DBL_LESS,
MP_TOKEN_OP_DBL_MORE,
MP_TOKEN_OP_PLUS,
MP_TOKEN_OP_MINUS,
MP_TOKEN_OP_STAR,
MP_TOKEN_OP_AT,
MP_TOKEN_OP_DBL_SLASH,
MP_TOKEN_OP_SLASH,
MP_TOKEN_OP_PERCENT,
MP_TOKEN_OP_DBL_STAR,
// Order of these 13 matches corresponding mp_binary_op_t operator
MP_TOKEN_DEL_PIPE_EQUAL,
MP_TOKEN_DEL_CARET_EQUAL,
MP_TOKEN_DEL_AMPERSAND_EQUAL,
MP_TOKEN_DEL_DBL_LESS_EQUAL,
MP_TOKEN_DEL_DBL_MORE_EQUAL,
MP_TOKEN_DEL_PLUS_EQUAL,
MP_TOKEN_DEL_MINUS_EQUAL,
MP_TOKEN_DEL_STAR_EQUAL,
MP_TOKEN_DEL_AT_EQUAL,
MP_TOKEN_DEL_DBL_SLASH_EQUAL,
MP_TOKEN_DEL_SLASH_EQUAL,
MP_TOKEN_DEL_PERCENT_EQUAL,
MP_TOKEN_DEL_DBL_STAR_EQUAL,
MP_TOKEN_DEL_PAREN_OPEN,
MP_TOKEN_DEL_PAREN_CLOSE,
MP_TOKEN_DEL_BRACKET_OPEN,
MP_TOKEN_DEL_BRACKET_CLOSE,
MP_TOKEN_DEL_BRACE_OPEN,
MP_TOKEN_DEL_BRACE_CLOSE,
MP_TOKEN_DEL_COMMA,
MP_TOKEN_DEL_COLON,
MP_TOKEN_DEL_PERIOD,
MP_TOKEN_DEL_SEMICOLON,
MP_TOKEN_DEL_EQUAL,
MP_TOKEN_DEL_MINUS_MORE,
} mp_token_kind_t;
// this data structure is exposed for efficiency
// public members are: source_name, tok_line, tok_column, tok_kind, vstr
typedef struct _mp_lexer_t {
qstr source_name; // name of source
mp_reader_t reader; // stream source
unichar chr0, chr1, chr2; // current cached characters from source
#if MICROPY_PY_FSTRINGS
unichar chr0_saved, chr1_saved, chr2_saved; // current cached characters from alt source
2021-03-15 09:57:36 -04:00
#endif
size_t line; // current source line
size_t column; // current source column
mp_int_t emit_dent; // non-zero when there are INDENT/DEDENT tokens to emit
mp_int_t nested_bracket_level; // >0 when there are nested brackets over multiple lines
size_t alloc_indent_level;
size_t num_indent_level;
uint16_t *indent_level;
size_t tok_line; // token source line
size_t tok_column; // token source column
mp_token_kind_t tok_kind; // token kind
vstr_t vstr; // token data
#if MICROPY_PY_FSTRINGS
vstr_t fstring_args; // extracted arguments to pass to .format()
size_t fstring_args_idx; // how many bytes of fstring_args have been read
2021-03-15 09:57:36 -04:00
#endif
} mp_lexer_t;
mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader);
mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, size_t len, size_t free_len);
void mp_lexer_free(mp_lexer_t *lex);
void mp_lexer_to_next(mp_lexer_t *lex);
/******************************************************************/
// platform specific import function; must be implemented for a specific port
// TODO tidy up, rename, or put elsewhere
typedef enum {
MP_IMPORT_STAT_NO_EXIST,
MP_IMPORT_STAT_DIR,
MP_IMPORT_STAT_FILE,
} mp_import_stat_t;
mp_import_stat_t mp_import_stat(const char *path);
mp_lexer_t *mp_lexer_new_from_file(const char *filename);
#if MICROPY_HELPER_LEXER_UNIX
mp_lexer_t *mp_lexer_new_from_fd(qstr filename, int fd, bool close_fd);
#endif
#endif // MICROPY_INCLUDED_PY_LEXER_H