2014-05-03 18:27:38 -04:00
|
|
|
/*
|
2017-06-30 03:22:17 -04:00
|
|
|
* This file is part of the MicroPython project, http://micropython.org/
|
2014-05-03 18:27:38 -04:00
|
|
|
*
|
|
|
|
* The MIT License (MIT)
|
|
|
|
*
|
|
|
|
* Copyright (c) 2013, 2014 Damien P. George
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2013-10-04 14:53:11 -04:00
|
|
|
#include <stdio.h>
|
2017-02-16 19:10:35 -05:00
|
|
|
#include <string.h>
|
2013-10-04 14:53:11 -04:00
|
|
|
#include <assert.h>
|
|
|
|
|
2016-11-16 00:22:08 -05:00
|
|
|
#include "py/reader.h"
|
2015-01-01 15:27:54 -05:00
|
|
|
#include "py/lexer.h"
|
2015-09-07 12:08:49 -04:00
|
|
|
#include "py/runtime.h"
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2018-08-08 21:24:49 -04:00
|
|
|
#include "supervisor/shared/translate.h"
|
|
|
|
|
2015-12-18 07:35:44 -05:00
|
|
|
#if MICROPY_ENABLE_COMPILER
|
|
|
|
|
2013-10-04 14:53:11 -04:00
|
|
|
#define TAB_SIZE (8)
|
|
|
|
|
2013-10-22 17:32:27 -04:00
|
|
|
// TODO seems that CPython allows NULL byte in the input stream
|
|
|
|
// don't know if that's intentional or not, but we don't allow it
|
|
|
|
|
2016-11-16 02:27:20 -05:00
|
|
|
#define MP_LEXER_EOF ((unichar)MP_READER_EOF)
|
2013-10-20 09:41:27 -04:00
|
|
|
#define CUR_CHAR(lex) ((lex)->chr0)
|
|
|
|
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC bool is_end(mp_lexer_t *lex) {
|
2014-07-30 06:46:05 -04:00
|
|
|
return lex->chr0 == MP_LEXER_EOF;
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC bool is_physical_newline(mp_lexer_t *lex) {
|
2015-01-29 19:27:46 -05:00
|
|
|
return lex->chr0 == '\n';
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
2015-03-18 20:21:29 -04:00
|
|
|
STATIC bool is_char(mp_lexer_t *lex, byte c) {
|
2013-10-04 14:53:11 -04:00
|
|
|
return lex->chr0 == c;
|
|
|
|
}
|
|
|
|
|
2015-03-18 20:21:29 -04:00
|
|
|
STATIC bool is_char_or(mp_lexer_t *lex, byte c1, byte c2) {
|
2013-10-04 14:53:11 -04:00
|
|
|
return lex->chr0 == c1 || lex->chr0 == c2;
|
|
|
|
}
|
|
|
|
|
2015-03-18 20:21:29 -04:00
|
|
|
STATIC bool is_char_or3(mp_lexer_t *lex, byte c1, byte c2, byte c3) {
|
2013-10-04 14:53:11 -04:00
|
|
|
return lex->chr0 == c1 || lex->chr0 == c2 || lex->chr0 == c3;
|
|
|
|
}
|
|
|
|
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
STATIC bool is_char_or4(mp_lexer_t *lex, byte c1, byte c2, byte c3, byte c4) {
|
|
|
|
return lex->chr0 == c1 || lex->chr0 == c2 || lex->chr0 == c3 || lex->chr0 == c4;
|
|
|
|
}
|
2020-03-09 10:02:47 -04:00
|
|
|
#endif
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
|
2015-03-18 20:21:29 -04:00
|
|
|
STATIC bool is_char_following(mp_lexer_t *lex, byte c) {
|
2013-10-04 14:53:11 -04:00
|
|
|
return lex->chr1 == c;
|
|
|
|
}
|
|
|
|
|
2015-03-18 20:21:29 -04:00
|
|
|
STATIC bool is_char_following_or(mp_lexer_t *lex, byte c1, byte c2) {
|
2013-10-04 14:53:11 -04:00
|
|
|
return lex->chr1 == c1 || lex->chr1 == c2;
|
|
|
|
}
|
|
|
|
|
2015-03-18 20:21:29 -04:00
|
|
|
STATIC bool is_char_following_following_or(mp_lexer_t *lex, byte c1, byte c2) {
|
2013-10-04 14:53:11 -04:00
|
|
|
return lex->chr2 == c1 || lex->chr2 == c2;
|
|
|
|
}
|
|
|
|
|
2015-03-18 20:21:29 -04:00
|
|
|
STATIC bool is_char_and(mp_lexer_t *lex, byte c1, byte c2) {
|
2013-10-04 14:53:11 -04:00
|
|
|
return lex->chr0 == c1 && lex->chr1 == c2;
|
|
|
|
}
|
|
|
|
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC bool is_whitespace(mp_lexer_t *lex) {
|
2013-12-30 13:23:50 -05:00
|
|
|
return unichar_isspace(lex->chr0);
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC bool is_letter(mp_lexer_t *lex) {
|
2013-12-30 13:23:50 -05:00
|
|
|
return unichar_isalpha(lex->chr0);
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC bool is_digit(mp_lexer_t *lex) {
|
2013-12-30 13:23:50 -05:00
|
|
|
return unichar_isdigit(lex->chr0);
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC bool is_following_digit(mp_lexer_t *lex) {
|
2013-12-30 13:23:50 -05:00
|
|
|
return unichar_isdigit(lex->chr1);
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
2015-09-07 12:33:44 -04:00
|
|
|
STATIC bool is_following_base_char(mp_lexer_t *lex) {
|
|
|
|
const unichar chr1 = lex->chr1 | 0x20;
|
|
|
|
return chr1 == 'b' || chr1 == 'o' || chr1 == 'x';
|
2015-02-07 20:57:40 -05:00
|
|
|
}
|
|
|
|
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC bool is_following_odigit(mp_lexer_t *lex) {
|
2014-01-22 15:40:02 -05:00
|
|
|
return lex->chr1 >= '0' && lex->chr1 <= '7';
|
|
|
|
}
|
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
STATIC bool is_string_or_bytes(mp_lexer_t *lex) {
|
|
|
|
return is_char_or(lex, '\'', '\"')
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
|| (is_char_or4(lex, 'r', 'u', 'b', 'f') && is_char_following_or(lex, '\'', '\"'))
|
|
|
|
|| ((is_char_and(lex, 'r', 'f') || is_char_and(lex, 'f', 'r'))
|
|
|
|
&& is_char_following_following_or(lex, '\'', '\"'))
|
2020-03-09 10:02:47 -04:00
|
|
|
#else
|
|
|
|
|| (is_char_or3(lex, 'r', 'u', 'b') && is_char_following_or(lex, '\'', '\"'))
|
|
|
|
#endif
|
2017-02-16 20:12:40 -05:00
|
|
|
|| ((is_char_and(lex, 'r', 'b') || is_char_and(lex, 'b', 'r'))
|
|
|
|
&& is_char_following_following_or(lex, '\'', '\"'));
|
|
|
|
}
|
|
|
|
|
2015-06-09 06:58:07 -04:00
|
|
|
// to easily parse utf-8 identifiers we allow any raw byte with high bit set
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC bool is_head_of_identifier(mp_lexer_t *lex) {
|
2015-06-09 06:58:07 -04:00
|
|
|
return is_letter(lex) || lex->chr0 == '_' || lex->chr0 >= 0x80;
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC bool is_tail_of_identifier(mp_lexer_t *lex) {
|
2013-10-04 14:53:11 -04:00
|
|
|
return is_head_of_identifier(lex) || is_digit(lex);
|
|
|
|
}
|
|
|
|
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
STATIC void swap_char_banks(mp_lexer_t *lex) {
|
|
|
|
if (lex->vstr_postfix_processing) {
|
2019-09-26 19:48:02 -04:00
|
|
|
lex->chr3 = lex->chr0;
|
|
|
|
lex->chr4 = lex->chr1;
|
|
|
|
lex->chr5 = lex->chr2;
|
|
|
|
lex->chr0 = lex->vstr_postfix.buf[0];
|
|
|
|
lex->chr1 = lex->vstr_postfix.buf[1];
|
|
|
|
lex->chr2 = lex->vstr_postfix.buf[2];
|
|
|
|
|
|
|
|
lex->vstr_postfix_idx = 3;
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
} else {
|
|
|
|
// blindly reset to the "backup" bank when done postfix processing
|
|
|
|
// this restores control to the mp_reader
|
|
|
|
lex->chr0 = lex->chr3;
|
|
|
|
lex->chr1 = lex->chr4;
|
|
|
|
lex->chr2 = lex->chr5;
|
2019-09-26 19:48:02 -04:00
|
|
|
// willfully ignoring setting chr3-5 here - WARNING consider those garbage data now
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
|
|
|
|
vstr_reset(&lex->vstr_postfix);
|
|
|
|
lex->vstr_postfix_idx = 0;
|
|
|
|
}
|
|
|
|
}
|
2020-03-09 10:02:47 -04:00
|
|
|
#endif
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC void next_char(mp_lexer_t *lex) {
|
2013-10-04 14:53:11 -04:00
|
|
|
if (lex->chr0 == '\n') {
|
2015-01-29 19:27:46 -05:00
|
|
|
// a new line
|
2013-10-04 14:53:11 -04:00
|
|
|
++lex->line;
|
|
|
|
lex->column = 1;
|
|
|
|
} else if (lex->chr0 == '\t') {
|
|
|
|
// a tab
|
|
|
|
lex->column = (((lex->column - 1 + TAB_SIZE) / TAB_SIZE) * TAB_SIZE) + 1;
|
|
|
|
} else {
|
|
|
|
// a character worth one column
|
|
|
|
++lex->column;
|
|
|
|
}
|
|
|
|
|
2015-01-29 19:27:46 -05:00
|
|
|
lex->chr0 = lex->chr1;
|
|
|
|
lex->chr1 = lex->chr2;
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
if (lex->vstr_postfix_processing) {
|
|
|
|
if (lex->vstr_postfix_idx == lex->vstr_postfix.len) {
|
|
|
|
lex->chr2 = '\0';
|
|
|
|
} else {
|
|
|
|
lex->chr2 = lex->vstr_postfix.buf[lex->vstr_postfix_idx++];
|
|
|
|
}
|
2020-03-09 10:02:47 -04:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
lex->chr2 = lex->reader.readbyte(lex->reader.data);
|
|
|
|
}
|
2015-01-29 19:27:46 -05:00
|
|
|
|
2017-05-09 16:19:46 -04:00
|
|
|
if (lex->chr1 == '\r') {
|
2015-01-29 19:27:46 -05:00
|
|
|
// CR is a new line, converted to LF
|
2017-05-09 16:19:46 -04:00
|
|
|
lex->chr1 = '\n';
|
|
|
|
if (lex->chr2 == '\n') {
|
|
|
|
// CR LF is a single new line, throw out the extra LF
|
2016-11-16 02:27:20 -05:00
|
|
|
lex->chr2 = lex->reader.readbyte(lex->reader.data);
|
2015-01-29 19:27:46 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-09 16:19:46 -04:00
|
|
|
// check if we need to insert a newline at end of file
|
|
|
|
if (lex->chr2 == MP_LEXER_EOF && lex->chr1 != MP_LEXER_EOF && lex->chr1 != '\n') {
|
|
|
|
lex->chr2 = '\n';
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
if (lex->vstr_postfix_processing && lex->chr0 == '\0') {
|
|
|
|
lex->vstr_postfix_processing = false;
|
|
|
|
swap_char_banks(lex);
|
|
|
|
}
|
2020-03-09 10:02:47 -04:00
|
|
|
#endif
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
2017-02-16 20:44:24 -05:00
|
|
|
STATIC void indent_push(mp_lexer_t *lex, size_t indent) {
|
2013-10-04 14:53:11 -04:00
|
|
|
if (lex->num_indent_level >= lex->alloc_indent_level) {
|
2014-05-21 15:32:59 -04:00
|
|
|
lex->indent_level = m_renew(uint16_t, lex->indent_level, lex->alloc_indent_level, lex->alloc_indent_level + MICROPY_ALLOC_LEXEL_INDENT_INC);
|
|
|
|
lex->alloc_indent_level += MICROPY_ALLOC_LEXEL_INDENT_INC;
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
lex->indent_level[lex->num_indent_level++] = indent;
|
|
|
|
}
|
|
|
|
|
2017-02-16 20:44:24 -05:00
|
|
|
STATIC size_t indent_top(mp_lexer_t *lex) {
|
2013-10-04 14:53:11 -04:00
|
|
|
return lex->indent_level[lex->num_indent_level - 1];
|
|
|
|
}
|
|
|
|
|
2014-12-05 14:35:18 -05:00
|
|
|
STATIC void indent_pop(mp_lexer_t *lex) {
|
2013-10-04 14:53:11 -04:00
|
|
|
lex->num_indent_level -= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// some tricky operator encoding:
|
|
|
|
// <op> = begin with <op>, if this opchar matches then begin here
|
|
|
|
// e<op> = end with <op>, if this opchar matches then end
|
|
|
|
// c<op> = continue with <op>, if this opchar matches then continue matching
|
|
|
|
// this means if the start of two ops are the same then they are equal til the last char
|
|
|
|
|
2016-05-20 07:38:15 -04:00
|
|
|
STATIC const char *const tok_enc =
|
2013-10-04 14:53:11 -04:00
|
|
|
"()[]{},:;@~" // singles
|
|
|
|
"<e=c<e=" // < <= << <<=
|
|
|
|
">e=c>e=" // > >= >> >>=
|
|
|
|
"*e=c*e=" // * *= ** **=
|
|
|
|
"+e=" // + +=
|
|
|
|
"-e=e>" // - -= ->
|
|
|
|
"&e=" // & &=
|
|
|
|
"|e=" // | |=
|
|
|
|
"/e=c/e=" // / /= // //=
|
|
|
|
"%e=" // % %=
|
|
|
|
"^e=" // ^ ^=
|
|
|
|
"=e=" // = ==
|
2017-03-28 19:55:36 -04:00
|
|
|
"!."; // start of special cases: != . ...
|
2013-10-04 14:53:11 -04:00
|
|
|
|
|
|
|
// TODO static assert that number of tokens is less than 256 so we can safely make this table with byte sized entries
|
2014-02-12 11:31:30 -05:00
|
|
|
STATIC const uint8_t tok_enc_kind[] = {
|
2013-12-21 13:17:45 -05:00
|
|
|
MP_TOKEN_DEL_PAREN_OPEN, MP_TOKEN_DEL_PAREN_CLOSE,
|
|
|
|
MP_TOKEN_DEL_BRACKET_OPEN, MP_TOKEN_DEL_BRACKET_CLOSE,
|
|
|
|
MP_TOKEN_DEL_BRACE_OPEN, MP_TOKEN_DEL_BRACE_CLOSE,
|
|
|
|
MP_TOKEN_DEL_COMMA, MP_TOKEN_DEL_COLON, MP_TOKEN_DEL_SEMICOLON, MP_TOKEN_DEL_AT, MP_TOKEN_OP_TILDE,
|
|
|
|
|
|
|
|
MP_TOKEN_OP_LESS, MP_TOKEN_OP_LESS_EQUAL, MP_TOKEN_OP_DBL_LESS, MP_TOKEN_DEL_DBL_LESS_EQUAL,
|
|
|
|
MP_TOKEN_OP_MORE, MP_TOKEN_OP_MORE_EQUAL, MP_TOKEN_OP_DBL_MORE, MP_TOKEN_DEL_DBL_MORE_EQUAL,
|
|
|
|
MP_TOKEN_OP_STAR, MP_TOKEN_DEL_STAR_EQUAL, MP_TOKEN_OP_DBL_STAR, MP_TOKEN_DEL_DBL_STAR_EQUAL,
|
|
|
|
MP_TOKEN_OP_PLUS, MP_TOKEN_DEL_PLUS_EQUAL,
|
|
|
|
MP_TOKEN_OP_MINUS, MP_TOKEN_DEL_MINUS_EQUAL, MP_TOKEN_DEL_MINUS_MORE,
|
|
|
|
MP_TOKEN_OP_AMPERSAND, MP_TOKEN_DEL_AMPERSAND_EQUAL,
|
|
|
|
MP_TOKEN_OP_PIPE, MP_TOKEN_DEL_PIPE_EQUAL,
|
|
|
|
MP_TOKEN_OP_SLASH, MP_TOKEN_DEL_SLASH_EQUAL, MP_TOKEN_OP_DBL_SLASH, MP_TOKEN_DEL_DBL_SLASH_EQUAL,
|
|
|
|
MP_TOKEN_OP_PERCENT, MP_TOKEN_DEL_PERCENT_EQUAL,
|
|
|
|
MP_TOKEN_OP_CARET, MP_TOKEN_DEL_CARET_EQUAL,
|
|
|
|
MP_TOKEN_DEL_EQUAL, MP_TOKEN_OP_DBL_EQUAL,
|
2013-10-04 14:53:11 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
// must have the same order as enum in lexer.h
|
2017-02-16 19:10:35 -05:00
|
|
|
// must be sorted according to strcmp
|
2016-05-20 07:38:15 -04:00
|
|
|
STATIC const char *const tok_kw[] = {
|
2013-10-04 14:53:11 -04:00
|
|
|
"False",
|
|
|
|
"None",
|
|
|
|
"True",
|
2017-02-16 19:10:35 -05:00
|
|
|
"__debug__",
|
2013-10-04 14:53:11 -04:00
|
|
|
"and",
|
|
|
|
"as",
|
|
|
|
"assert",
|
2016-01-27 15:23:11 -05:00
|
|
|
#if MICROPY_PY_ASYNC_AWAIT
|
|
|
|
"async",
|
|
|
|
"await",
|
|
|
|
#endif
|
2013-10-04 14:53:11 -04:00
|
|
|
"break",
|
|
|
|
"class",
|
|
|
|
"continue",
|
|
|
|
"def",
|
|
|
|
"del",
|
|
|
|
"elif",
|
|
|
|
"else",
|
|
|
|
"except",
|
|
|
|
"finally",
|
|
|
|
"for",
|
|
|
|
"from",
|
|
|
|
"global",
|
|
|
|
"if",
|
|
|
|
"import",
|
|
|
|
"in",
|
|
|
|
"is",
|
|
|
|
"lambda",
|
|
|
|
"nonlocal",
|
|
|
|
"not",
|
|
|
|
"or",
|
|
|
|
"pass",
|
|
|
|
"raise",
|
|
|
|
"return",
|
|
|
|
"try",
|
|
|
|
"while",
|
|
|
|
"with",
|
|
|
|
"yield",
|
|
|
|
};
|
|
|
|
|
2014-01-22 15:40:02 -05:00
|
|
|
// This is called with CUR_CHAR() before first hex digit, and should return with
|
|
|
|
// it pointing to last hex digit
|
2014-07-03 08:47:47 -04:00
|
|
|
// num_digits must be greater than zero
|
2017-02-16 20:44:24 -05:00
|
|
|
STATIC bool get_hex(mp_lexer_t *lex, size_t num_digits, mp_uint_t *result) {
|
2014-07-03 08:47:47 -04:00
|
|
|
mp_uint_t num = 0;
|
2014-01-22 15:40:02 -05:00
|
|
|
while (num_digits-- != 0) {
|
|
|
|
next_char(lex);
|
|
|
|
unichar c = CUR_CHAR(lex);
|
|
|
|
if (!unichar_isxdigit(c)) {
|
|
|
|
return false;
|
|
|
|
}
|
2015-05-18 17:41:25 -04:00
|
|
|
num = (num << 4) + unichar_xdigit_value(c);
|
2014-01-22 15:40:02 -05:00
|
|
|
}
|
|
|
|
*result = num;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
STATIC void parse_string_literal(mp_lexer_t *lex, bool is_raw, bool is_fstring) {
|
2017-02-16 20:12:40 -05:00
|
|
|
// get first quoting character
|
|
|
|
char quote_char = '\'';
|
|
|
|
if (is_char(lex, '\"')) {
|
|
|
|
quote_char = '\"';
|
|
|
|
}
|
|
|
|
next_char(lex);
|
2014-12-05 14:35:18 -05:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
// work out if it's a single or triple quoted literal
|
|
|
|
size_t num_quotes;
|
|
|
|
if (is_char_and(lex, quote_char, quote_char)) {
|
|
|
|
// triple quotes
|
|
|
|
next_char(lex);
|
|
|
|
next_char(lex);
|
|
|
|
num_quotes = 3;
|
|
|
|
} else {
|
|
|
|
// single quotes
|
|
|
|
num_quotes = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t n_closing = 0;
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
bool in_expression = false;
|
|
|
|
bool expression_eat = true;
|
2020-03-09 10:02:47 -04:00
|
|
|
#endif
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
while (!is_end(lex) && (num_quotes > 1 || !is_char(lex, '\n')) && n_closing < num_quotes) {
|
|
|
|
if (is_char(lex, quote_char)) {
|
|
|
|
n_closing += 1;
|
|
|
|
vstr_add_char(&lex->vstr, CUR_CHAR(lex));
|
|
|
|
} else {
|
|
|
|
n_closing = 0;
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
if (is_fstring && is_char(lex, '{')) {
|
|
|
|
vstr_add_char(&lex->vstr, CUR_CHAR(lex));
|
|
|
|
in_expression = !in_expression;
|
|
|
|
expression_eat = in_expression;
|
|
|
|
|
|
|
|
if (lex->vstr_postfix.len == 0) {
|
|
|
|
vstr_add_str(&lex->vstr_postfix, ".format(");
|
|
|
|
}
|
|
|
|
|
|
|
|
next_char(lex);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_fstring && is_char(lex, '}')) {
|
|
|
|
vstr_add_char(&lex->vstr, CUR_CHAR(lex));
|
|
|
|
|
|
|
|
if (in_expression) {
|
|
|
|
in_expression = false;
|
|
|
|
vstr_add_char(&lex->vstr_postfix, ',');
|
|
|
|
}
|
|
|
|
|
|
|
|
next_char(lex);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (in_expression) {
|
|
|
|
// throw errors for illegal chars inside f-string expressions
|
|
|
|
if (is_char(lex, '#')) {
|
|
|
|
lex->tok_kind = MP_TOKEN_FSTRING_COMMENT;
|
|
|
|
return;
|
|
|
|
} else if (is_char(lex, '\\')) {
|
|
|
|
lex->tok_kind = MP_TOKEN_FSTRING_BACKSLASH;
|
|
|
|
return;
|
|
|
|
} else if (is_char(lex, ':')) {
|
|
|
|
expression_eat = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
unichar c = CUR_CHAR(lex);
|
|
|
|
if (expression_eat) {
|
|
|
|
vstr_add_char(&lex->vstr_postfix, c);
|
|
|
|
} else {
|
|
|
|
vstr_add_char(&lex->vstr, c);
|
|
|
|
}
|
|
|
|
|
|
|
|
next_char(lex);
|
|
|
|
continue;
|
|
|
|
}
|
2020-03-09 10:02:47 -04:00
|
|
|
#endif
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
if (is_char(lex, '\\')) {
|
|
|
|
next_char(lex);
|
|
|
|
unichar c = CUR_CHAR(lex);
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
if (is_raw) {
|
|
|
|
// raw strings allow escaping of quotes, but the backslash is also emitted
|
|
|
|
vstr_add_char(&lex->vstr, '\\');
|
|
|
|
} else {
|
|
|
|
switch (c) {
|
|
|
|
// note: "c" can never be MP_LEXER_EOF because next_char
|
|
|
|
// always inserts a newline at the end of the input stream
|
|
|
|
case '\n': c = MP_LEXER_EOF; break; // backslash escape the newline, just ignore it
|
|
|
|
case '\\': break;
|
|
|
|
case '\'': break;
|
|
|
|
case '"': break;
|
|
|
|
case 'a': c = 0x07; break;
|
|
|
|
case 'b': c = 0x08; break;
|
|
|
|
case 't': c = 0x09; break;
|
|
|
|
case 'n': c = 0x0a; break;
|
|
|
|
case 'v': c = 0x0b; break;
|
|
|
|
case 'f': c = 0x0c; break;
|
|
|
|
case 'r': c = 0x0d; break;
|
|
|
|
case 'u':
|
|
|
|
case 'U':
|
|
|
|
if (lex->tok_kind == MP_TOKEN_BYTES) {
|
|
|
|
// b'\u1234' == b'\\u1234'
|
|
|
|
vstr_add_char(&lex->vstr, '\\');
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Otherwise fall through.
|
|
|
|
case 'x':
|
|
|
|
{
|
|
|
|
mp_uint_t num = 0;
|
|
|
|
if (!get_hex(lex, (c == 'x' ? 2 : c == 'u' ? 4 : 8), &num)) {
|
|
|
|
// not enough hex chars for escape sequence
|
|
|
|
lex->tok_kind = MP_TOKEN_INVALID;
|
|
|
|
}
|
|
|
|
c = num;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 'N':
|
|
|
|
// Supporting '\N{LATIN SMALL LETTER A}' == 'a' would require keeping the
|
|
|
|
// entire Unicode name table in the core. As of Unicode 6.3.0, that's nearly
|
|
|
|
// 3MB of text; even gzip-compressed and with minimal structure, it'll take
|
|
|
|
// roughly half a meg of storage. This form of Unicode escape may be added
|
|
|
|
// later on, but it's definitely not a priority right now. -- CJA 20140607
|
2018-08-08 21:24:49 -04:00
|
|
|
mp_raise_NotImplementedError(translate("unicode name escapes"));
|
2017-02-16 20:12:40 -05:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (c >= '0' && c <= '7') {
|
|
|
|
// Octal sequence, 1-3 chars
|
2017-02-16 20:44:24 -05:00
|
|
|
size_t digits = 3;
|
2017-02-16 20:12:40 -05:00
|
|
|
mp_uint_t num = c - '0';
|
|
|
|
while (is_following_odigit(lex) && --digits != 0) {
|
|
|
|
next_char(lex);
|
|
|
|
num = num * 8 + (CUR_CHAR(lex) - '0');
|
|
|
|
}
|
|
|
|
c = num;
|
|
|
|
} else {
|
|
|
|
// unrecognised escape character; CPython lets this through verbatim as '\' and then the character
|
|
|
|
vstr_add_char(&lex->vstr, '\\');
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (c != MP_LEXER_EOF) {
|
|
|
|
if (MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) {
|
|
|
|
if (c < 0x110000 && lex->tok_kind == MP_TOKEN_STRING) {
|
|
|
|
vstr_add_char(&lex->vstr, c);
|
|
|
|
} else if (c < 0x100 && lex->tok_kind == MP_TOKEN_BYTES) {
|
|
|
|
vstr_add_byte(&lex->vstr, c);
|
|
|
|
} else {
|
|
|
|
// unicode character out of range
|
|
|
|
// this raises a generic SyntaxError; could provide more info
|
|
|
|
lex->tok_kind = MP_TOKEN_INVALID;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// without unicode everything is just added as an 8-bit byte
|
|
|
|
if (c < 0x100) {
|
|
|
|
vstr_add_byte(&lex->vstr, c);
|
|
|
|
} else {
|
|
|
|
// 8-bit character out of range
|
|
|
|
// this raises a generic SyntaxError; could provide more info
|
|
|
|
lex->tok_kind = MP_TOKEN_INVALID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Add the "character" as a byte so that we remain 8-bit clean.
|
|
|
|
// This way, strings are parsed correctly whether or not they contain utf-8 chars.
|
|
|
|
vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
next_char(lex);
|
|
|
|
}
|
|
|
|
|
|
|
|
// check we got the required end quotes
|
|
|
|
if (n_closing < num_quotes) {
|
|
|
|
lex->tok_kind = MP_TOKEN_LONELY_STRING_OPEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
// cut off the end quotes from the token text
|
|
|
|
vstr_cut_tail_bytes(&lex->vstr, n_closing);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC bool skip_whitespace(mp_lexer_t *lex, bool stop_at_newline) {
|
2013-10-04 14:53:11 -04:00
|
|
|
bool had_physical_newline = false;
|
|
|
|
while (!is_end(lex)) {
|
|
|
|
if (is_physical_newline(lex)) {
|
2017-02-16 20:12:40 -05:00
|
|
|
if (stop_at_newline && lex->nested_bracket_level == 0) {
|
|
|
|
break;
|
|
|
|
}
|
2013-10-04 14:53:11 -04:00
|
|
|
had_physical_newline = true;
|
|
|
|
next_char(lex);
|
|
|
|
} else if (is_whitespace(lex)) {
|
|
|
|
next_char(lex);
|
|
|
|
} else if (is_char(lex, '#')) {
|
|
|
|
next_char(lex);
|
|
|
|
while (!is_end(lex) && !is_physical_newline(lex)) {
|
|
|
|
next_char(lex);
|
|
|
|
}
|
|
|
|
// had_physical_newline will be set on next loop
|
2017-02-16 19:30:14 -05:00
|
|
|
} else if (is_char_and(lex, '\\', '\n')) {
|
|
|
|
// line-continuation, so don't set had_physical_newline
|
|
|
|
next_char(lex);
|
2013-10-04 14:53:11 -04:00
|
|
|
next_char(lex);
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-02-16 20:12:40 -05:00
|
|
|
return had_physical_newline;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mp_lexer_to_next(mp_lexer_t *lex) {
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
if (lex->vstr_postfix.len && !lex->vstr_postfix_processing) {
|
|
|
|
// end format call injection
|
|
|
|
vstr_add_char(&lex->vstr_postfix, ')');
|
|
|
|
lex->vstr_postfix_processing = true;
|
|
|
|
swap_char_banks(lex);
|
|
|
|
}
|
2020-03-09 10:02:47 -04:00
|
|
|
#endif
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
// start new token text
|
|
|
|
vstr_reset(&lex->vstr);
|
|
|
|
|
|
|
|
// skip white space and comments
|
|
|
|
bool had_physical_newline = skip_whitespace(lex, false);
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2013-10-20 09:41:27 -04:00
|
|
|
// set token source information
|
2014-12-05 14:35:18 -05:00
|
|
|
lex->tok_line = lex->line;
|
|
|
|
lex->tok_column = lex->column;
|
2013-10-20 09:41:27 -04:00
|
|
|
|
2017-02-16 18:56:06 -05:00
|
|
|
if (lex->emit_dent < 0) {
|
2014-12-05 14:35:18 -05:00
|
|
|
lex->tok_kind = MP_TOKEN_DEDENT;
|
2013-10-04 14:53:11 -04:00
|
|
|
lex->emit_dent += 1;
|
|
|
|
|
|
|
|
} else if (lex->emit_dent > 0) {
|
2014-12-05 14:35:18 -05:00
|
|
|
lex->tok_kind = MP_TOKEN_INDENT;
|
2013-10-04 14:53:11 -04:00
|
|
|
lex->emit_dent -= 1;
|
|
|
|
|
2013-10-09 10:09:52 -04:00
|
|
|
} else if (had_physical_newline && lex->nested_bracket_level == 0) {
|
2014-12-05 14:35:18 -05:00
|
|
|
lex->tok_kind = MP_TOKEN_NEWLINE;
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2017-02-16 20:44:24 -05:00
|
|
|
size_t num_spaces = lex->column - 1;
|
2013-10-04 14:53:11 -04:00
|
|
|
if (num_spaces == indent_top(lex)) {
|
|
|
|
} else if (num_spaces > indent_top(lex)) {
|
|
|
|
indent_push(lex, num_spaces);
|
|
|
|
lex->emit_dent += 1;
|
|
|
|
} else {
|
|
|
|
while (num_spaces < indent_top(lex)) {
|
|
|
|
indent_pop(lex);
|
|
|
|
lex->emit_dent -= 1;
|
|
|
|
}
|
|
|
|
if (num_spaces != indent_top(lex)) {
|
2014-12-05 14:35:18 -05:00
|
|
|
lex->tok_kind = MP_TOKEN_DEDENT_MISMATCH;
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (is_end(lex)) {
|
2016-10-11 20:00:17 -04:00
|
|
|
lex->tok_kind = MP_TOKEN_END;
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
} else if (is_string_or_bytes(lex)) {
|
2013-10-04 14:53:11 -04:00
|
|
|
// a string or bytes literal
|
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
// Python requires adjacent string/bytes literals to be automatically
|
|
|
|
// concatenated. We do it here in the tokeniser to make efficient use of RAM,
|
|
|
|
// because then the lexer's vstr can be used to accumulate the string literal,
|
|
|
|
// in contrast to creating a parse tree of strings and then joining them later
|
|
|
|
// in the compiler. It's also more compact in code size to do it here.
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
// MP_TOKEN_END is used to indicate that this is the first string token
|
|
|
|
lex->tok_kind = MP_TOKEN_END;
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
2020-03-09 09:33:47 -04:00
|
|
|
bool saw_normal = false, saw_fstring = false;
|
2020-03-09 10:02:47 -04:00
|
|
|
#endif
|
2020-03-09 09:33:47 -04:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
// Loop to accumulate string/bytes literals
|
|
|
|
do {
|
|
|
|
// parse type codes
|
|
|
|
bool is_raw = false;
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
bool is_fstring = false;
|
2020-03-09 10:02:47 -04:00
|
|
|
#else
|
|
|
|
const bool is_fstring = false;
|
|
|
|
#endif
|
2017-02-16 20:12:40 -05:00
|
|
|
mp_token_kind_t kind = MP_TOKEN_STRING;
|
|
|
|
int n_char = 0;
|
|
|
|
if (is_char(lex, 'u')) {
|
|
|
|
n_char = 1;
|
|
|
|
} else if (is_char(lex, 'b')) {
|
|
|
|
kind = MP_TOKEN_BYTES;
|
|
|
|
n_char = 1;
|
|
|
|
if (is_char_following(lex, 'r')) {
|
|
|
|
is_raw = true;
|
|
|
|
n_char = 2;
|
|
|
|
}
|
|
|
|
} else if (is_char(lex, 'r')) {
|
|
|
|
is_raw = true;
|
|
|
|
n_char = 1;
|
|
|
|
if (is_char_following(lex, 'b')) {
|
|
|
|
kind = MP_TOKEN_BYTES;
|
|
|
|
n_char = 2;
|
|
|
|
}
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
if (is_char_following(lex, 'f')) {
|
|
|
|
lex->tok_kind = MP_TOKEN_FSTRING_RAW;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else if (is_char(lex, 'f')) {
|
|
|
|
if (is_char_following(lex, 'r')) {
|
|
|
|
lex->tok_kind = MP_TOKEN_FSTRING_RAW;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
n_char = 1;
|
|
|
|
is_fstring = true;
|
2020-03-09 10:02:47 -04:00
|
|
|
#endif
|
2017-02-16 20:12:40 -05:00
|
|
|
}
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
2020-03-09 09:33:47 -04:00
|
|
|
if (is_fstring) {
|
|
|
|
saw_fstring = true;
|
|
|
|
} else {
|
|
|
|
saw_normal = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (saw_fstring && saw_normal) {
|
|
|
|
// Can't concatenate f-string with normal string
|
|
|
|
break;
|
|
|
|
}
|
2020-03-09 10:02:47 -04:00
|
|
|
#endif
|
2020-03-09 09:33:47 -04:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
// Set or check token kind
|
|
|
|
if (lex->tok_kind == MP_TOKEN_END) {
|
|
|
|
lex->tok_kind = kind;
|
|
|
|
} else if (lex->tok_kind != kind) {
|
|
|
|
// Can't concatenate string with bytes
|
|
|
|
break;
|
|
|
|
}
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
// Skip any type code characters
|
|
|
|
if (n_char != 0) {
|
|
|
|
next_char(lex);
|
|
|
|
if (n_char == 2) {
|
2013-10-04 14:53:11 -04:00
|
|
|
next_char(lex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
// Parse the literal
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
parse_string_literal(lex, is_raw, is_fstring);
|
2017-02-16 20:12:40 -05:00
|
|
|
|
|
|
|
// Skip whitespace so we can check if there's another string following
|
|
|
|
skip_whitespace(lex, true);
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2017-02-16 20:12:40 -05:00
|
|
|
} while (is_string_or_bytes(lex));
|
2013-10-04 14:53:11 -04:00
|
|
|
} else if (is_head_of_identifier(lex)) {
|
2014-12-05 14:35:18 -05:00
|
|
|
lex->tok_kind = MP_TOKEN_NAME;
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2015-06-09 06:58:07 -04:00
|
|
|
// get first char (add as byte to remain 8-bit clean and support utf-8)
|
|
|
|
vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
|
2013-10-04 14:53:11 -04:00
|
|
|
next_char(lex);
|
|
|
|
|
2013-10-20 09:41:27 -04:00
|
|
|
// get tail chars
|
2013-10-04 14:53:11 -04:00
|
|
|
while (!is_end(lex) && is_tail_of_identifier(lex)) {
|
2015-06-09 06:58:07 -04:00
|
|
|
vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
|
2013-10-04 14:53:11 -04:00
|
|
|
next_char(lex);
|
|
|
|
}
|
|
|
|
|
2017-02-16 18:59:57 -05:00
|
|
|
// Check if the name is a keyword.
|
|
|
|
// We also check for __debug__ here and convert it to its value. This is
|
|
|
|
// so the parser gives a syntax error on, eg, x.__debug__. Otherwise, we
|
|
|
|
// need to check for this special token in many places in the compiler.
|
2017-02-16 19:10:35 -05:00
|
|
|
const char *s = vstr_null_terminated_str(&lex->vstr);
|
2017-02-16 18:59:57 -05:00
|
|
|
for (size_t i = 0; i < MP_ARRAY_SIZE(tok_kw); i++) {
|
2017-02-16 19:10:35 -05:00
|
|
|
int cmp = strcmp(s, tok_kw[i]);
|
|
|
|
if (cmp == 0) {
|
|
|
|
lex->tok_kind = MP_TOKEN_KW_FALSE + i;
|
|
|
|
if (lex->tok_kind == MP_TOKEN_KW___DEBUG__) {
|
2017-02-16 18:59:57 -05:00
|
|
|
lex->tok_kind = (MP_STATE_VM(mp_optimise_value) == 0 ? MP_TOKEN_KW_TRUE : MP_TOKEN_KW_FALSE);
|
|
|
|
}
|
|
|
|
break;
|
2017-02-16 19:10:35 -05:00
|
|
|
} else if (cmp < 0) {
|
|
|
|
// Table is sorted and comparison was less-than, so stop searching
|
|
|
|
break;
|
2017-02-16 18:59:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-04 14:53:11 -04:00
|
|
|
} else if (is_digit(lex) || (is_char(lex, '.') && is_following_digit(lex))) {
|
2015-02-07 20:57:40 -05:00
|
|
|
bool forced_integer = false;
|
|
|
|
if (is_char(lex, '.')) {
|
|
|
|
lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
|
|
|
|
} else {
|
|
|
|
lex->tok_kind = MP_TOKEN_INTEGER;
|
2015-09-07 12:33:44 -04:00
|
|
|
if (is_char(lex, '0') && is_following_base_char(lex)) {
|
2015-02-07 20:57:40 -05:00
|
|
|
forced_integer = true;
|
|
|
|
}
|
|
|
|
}
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2013-10-20 09:41:27 -04:00
|
|
|
// get first char
|
|
|
|
vstr_add_char(&lex->vstr, CUR_CHAR(lex));
|
2013-10-04 14:53:11 -04:00
|
|
|
next_char(lex);
|
|
|
|
|
2013-10-20 09:41:27 -04:00
|
|
|
// get tail chars
|
2013-10-04 14:53:11 -04:00
|
|
|
while (!is_end(lex)) {
|
2015-02-07 20:57:40 -05:00
|
|
|
if (!forced_integer && is_char_or(lex, 'e', 'E')) {
|
|
|
|
lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
|
2013-10-20 09:41:27 -04:00
|
|
|
vstr_add_char(&lex->vstr, 'e');
|
2013-10-04 14:53:11 -04:00
|
|
|
next_char(lex);
|
|
|
|
if (is_char(lex, '+') || is_char(lex, '-')) {
|
2013-10-20 09:41:27 -04:00
|
|
|
vstr_add_char(&lex->vstr, CUR_CHAR(lex));
|
2013-10-04 14:53:11 -04:00
|
|
|
next_char(lex);
|
|
|
|
}
|
2015-02-07 20:57:40 -05:00
|
|
|
} else if (is_letter(lex) || is_digit(lex) || is_char(lex, '.')) {
|
|
|
|
if (is_char_or3(lex, '.', 'j', 'J')) {
|
|
|
|
lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
|
|
|
|
}
|
2013-10-20 09:41:27 -04:00
|
|
|
vstr_add_char(&lex->vstr, CUR_CHAR(lex));
|
2013-10-04 14:53:11 -04:00
|
|
|
next_char(lex);
|
2018-06-01 10:21:46 -04:00
|
|
|
} else if (is_char(lex, '_')) {
|
|
|
|
next_char(lex);
|
2013-10-04 14:53:11 -04:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// search for encoded delimiter or operator
|
|
|
|
|
|
|
|
const char *t = tok_enc;
|
2017-02-16 20:44:24 -05:00
|
|
|
size_t tok_enc_index = 0;
|
2013-10-04 14:53:11 -04:00
|
|
|
for (; *t != 0 && !is_char(lex, *t); t += 1) {
|
|
|
|
if (*t == 'e' || *t == 'c') {
|
|
|
|
t += 1;
|
|
|
|
}
|
|
|
|
tok_enc_index += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
next_char(lex);
|
|
|
|
|
|
|
|
if (*t == 0) {
|
|
|
|
// didn't match any delimiter or operator characters
|
2014-12-05 14:35:18 -05:00
|
|
|
lex->tok_kind = MP_TOKEN_INVALID;
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2017-03-28 19:55:36 -04:00
|
|
|
} else if (*t == '!') {
|
|
|
|
// "!=" is a special case because "!" is not a valid operator
|
|
|
|
if (is_char(lex, '=')) {
|
|
|
|
next_char(lex);
|
|
|
|
lex->tok_kind = MP_TOKEN_OP_NOT_EQUAL;
|
|
|
|
} else {
|
|
|
|
lex->tok_kind = MP_TOKEN_INVALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (*t == '.') {
|
|
|
|
// "." and "..." are special cases because ".." is not a valid operator
|
|
|
|
if (is_char_and(lex, '.', '.')) {
|
|
|
|
next_char(lex);
|
|
|
|
next_char(lex);
|
|
|
|
lex->tok_kind = MP_TOKEN_ELLIPSIS;
|
|
|
|
} else {
|
|
|
|
lex->tok_kind = MP_TOKEN_DEL_PERIOD;
|
|
|
|
}
|
|
|
|
|
2013-10-04 14:53:11 -04:00
|
|
|
} else {
|
|
|
|
// matched a delimiter or operator character
|
|
|
|
|
|
|
|
// get the maximum characters for a valid token
|
|
|
|
t += 1;
|
2017-02-16 20:44:24 -05:00
|
|
|
size_t t_index = tok_enc_index;
|
2017-03-28 19:55:36 -04:00
|
|
|
while (*t == 'c' || *t == 'e') {
|
|
|
|
t_index += 1;
|
|
|
|
if (is_char(lex, t[1])) {
|
|
|
|
next_char(lex);
|
|
|
|
tok_enc_index = t_index;
|
|
|
|
if (*t == 'e') {
|
2013-10-04 14:53:11 -04:00
|
|
|
break;
|
|
|
|
}
|
2017-03-28 19:55:36 -04:00
|
|
|
} else if (*t == 'c') {
|
2013-10-04 14:53:11 -04:00
|
|
|
break;
|
|
|
|
}
|
2017-03-28 19:55:36 -04:00
|
|
|
t += 2;
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// set token kind
|
2014-12-05 14:35:18 -05:00
|
|
|
lex->tok_kind = tok_enc_kind[tok_enc_index];
|
2013-10-04 14:53:11 -04:00
|
|
|
|
|
|
|
// compute bracket level for implicit line joining
|
2014-12-05 14:35:18 -05:00
|
|
|
if (lex->tok_kind == MP_TOKEN_DEL_PAREN_OPEN || lex->tok_kind == MP_TOKEN_DEL_BRACKET_OPEN || lex->tok_kind == MP_TOKEN_DEL_BRACE_OPEN) {
|
2013-10-04 14:53:11 -04:00
|
|
|
lex->nested_bracket_level += 1;
|
2014-12-05 14:35:18 -05:00
|
|
|
} else if (lex->tok_kind == MP_TOKEN_DEL_PAREN_CLOSE || lex->tok_kind == MP_TOKEN_DEL_BRACKET_CLOSE || lex->tok_kind == MP_TOKEN_DEL_BRACE_CLOSE) {
|
2013-10-04 14:53:11 -04:00
|
|
|
lex->nested_bracket_level -= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-16 02:27:20 -05:00
|
|
|
mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader) {
|
2017-03-13 20:16:31 -04:00
|
|
|
mp_lexer_t *lex = m_new_obj(mp_lexer_t);
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2014-01-25 08:51:19 -05:00
|
|
|
lex->source_name = src_name;
|
2016-11-16 02:27:20 -05:00
|
|
|
lex->reader = reader;
|
2013-10-04 14:53:11 -04:00
|
|
|
lex->line = 1;
|
2017-06-30 19:23:29 -04:00
|
|
|
lex->column = (size_t)-2; // account for 3 dummy bytes
|
2013-10-04 14:53:11 -04:00
|
|
|
lex->emit_dent = 0;
|
|
|
|
lex->nested_bracket_level = 0;
|
2014-05-21 15:32:59 -04:00
|
|
|
lex->alloc_indent_level = MICROPY_ALLOC_LEXER_INDENT_INIT;
|
2013-10-04 14:53:11 -04:00
|
|
|
lex->num_indent_level = 1;
|
2017-03-13 20:16:31 -04:00
|
|
|
lex->indent_level = m_new(uint16_t, lex->alloc_indent_level);
|
2014-01-13 16:15:23 -05:00
|
|
|
vstr_init(&lex->vstr, 32);
|
2020-03-09 10:02:47 -04:00
|
|
|
#if MICROPY_COMP_FSTRING_LITERAL
|
py: Implement partial PEP-498 (f-string) support
This implements (most of) the PEP-498 spec for f-strings, with two
exceptions:
- raw f-strings (`fr` or `rf` prefixes) raise `NotImplementedError`
- one special corner case does not function as specified in the PEP
(more on that in a moment)
This is implemented in the core as a syntax translation, brute-forcing
all f-strings to run through `String.format`. For example, the statement
`x='world'; print(f'hello {x}')` gets translated *at a syntax level*
(injected into the lexer) to `x='world'; print('hello {}'.format(x))`.
While this may lead to weird column results in tracebacks, it seemed
like the fastest, most efficient, and *likely* most RAM-friendly option,
despite being implemented under the hood with a completely separate
`vstr_t`.
Since [string concatenation of adjacent literals is implemented in the
lexer](https://github.com/micropython/micropython/commit/534b7c368dc2af7720f3aaed0c936ef46d773957),
two side effects emerge:
- All strings with at least one f-string portion are concatenated into a
single literal which *must* be run through `String.format()` wholesale,
and:
- Concatenation of a raw string with interpolation characters with an
f-string will cause `IndexError`/`KeyError`, which is both different
from CPython *and* different from the corner case mentioned in the PEP
(which gave an example of the following:)
```python
x = 10
y = 'hi'
assert ('a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e') == 'ab10{c}str< hi >de'
```
The above-linked commit detailed a pretty solid case for leaving string
concatenation in the lexer rather than putting it in the parser, and
undoing that decision would likely be disproportionately costly on
resources for the sake of a probably-low-impact corner case. An
alternative to become complaint with this corner case of the PEP would
be to revert to string concatenation in the parser *only when an
f-string is part of concatenation*, though I've done no investigation on
the difficulty or costs of doing this.
A decent set of tests is included. I've manually tested this on the
`unix` port on Linux and on a Feather M4 Express (`atmel-samd`) and
things seem sane.
2019-08-11 00:27:20 -04:00
|
|
|
vstr_init(&lex->vstr_postfix, 0);
|
2020-03-09 10:02:47 -04:00
|
|
|
#endif
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2014-05-10 12:48:01 -04:00
|
|
|
// store sentinel for first indentation level
|
|
|
|
lex->indent_level[0] = 0;
|
|
|
|
|
2017-05-04 19:31:08 -04:00
|
|
|
// load lexer with start of file, advancing lex->column to 1
|
|
|
|
// start with dummy bytes and use next_char() for proper EOL/EOF handling
|
|
|
|
lex->chr0 = lex->chr1 = lex->chr2 = 0;
|
|
|
|
next_char(lex);
|
|
|
|
next_char(lex);
|
|
|
|
next_char(lex);
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2013-10-20 09:41:27 -04:00
|
|
|
// preload first token
|
2017-02-16 18:56:06 -05:00
|
|
|
mp_lexer_to_next(lex);
|
|
|
|
|
|
|
|
// Check that the first token is in the first column. If it's not then we
|
|
|
|
// convert the token kind to INDENT so that the parser gives a syntax error.
|
|
|
|
if (lex->tok_column != 1) {
|
|
|
|
lex->tok_kind = MP_TOKEN_INDENT;
|
|
|
|
}
|
2013-10-04 14:53:11 -04:00
|
|
|
|
|
|
|
return lex;
|
|
|
|
}
|
|
|
|
|
2017-02-16 20:44:24 -05:00
|
|
|
mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, size_t len, size_t free_len) {
|
2016-11-16 00:22:08 -05:00
|
|
|
mp_reader_t reader;
|
2017-03-13 20:16:31 -04:00
|
|
|
mp_reader_new_mem(&reader, (const byte*)str, len, free_len);
|
2016-11-16 02:27:20 -05:00
|
|
|
return mp_lexer_new(src_name, reader);
|
2016-11-16 00:22:08 -05:00
|
|
|
}
|
|
|
|
|
2017-01-28 23:16:51 -05:00
|
|
|
#if MICROPY_READER_POSIX || MICROPY_READER_VFS
|
2016-11-16 00:25:06 -05:00
|
|
|
|
|
|
|
mp_lexer_t *mp_lexer_new_from_file(const char *filename) {
|
|
|
|
mp_reader_t reader;
|
2017-03-13 20:16:31 -04:00
|
|
|
mp_reader_new_file(&reader, filename);
|
2016-11-16 02:27:20 -05:00
|
|
|
return mp_lexer_new(qstr_from_str(filename), reader);
|
2016-11-16 00:25:06 -05:00
|
|
|
}
|
|
|
|
|
2016-11-16 02:12:55 -05:00
|
|
|
#if MICROPY_HELPER_LEXER_UNIX
|
|
|
|
|
|
|
|
mp_lexer_t *mp_lexer_new_from_fd(qstr filename, int fd, bool close_fd) {
|
|
|
|
mp_reader_t reader;
|
2017-03-13 20:16:31 -04:00
|
|
|
mp_reader_new_file_from_fd(&reader, fd, close_fd);
|
2016-11-16 02:27:20 -05:00
|
|
|
return mp_lexer_new(filename, reader);
|
2016-11-16 02:12:55 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2016-11-16 00:25:06 -05:00
|
|
|
#endif
|
|
|
|
|
2013-12-21 13:17:45 -05:00
|
|
|
void mp_lexer_free(mp_lexer_t *lex) {
|
2013-10-20 09:41:27 -04:00
|
|
|
if (lex) {
|
2016-11-16 02:27:20 -05:00
|
|
|
lex->reader.close(lex->reader.data);
|
2013-10-22 16:12:29 -04:00
|
|
|
vstr_clear(&lex->vstr);
|
2014-01-23 15:25:57 -05:00
|
|
|
m_del(uint16_t, lex->indent_level, lex->alloc_indent_level);
|
2013-12-29 14:33:23 -05:00
|
|
|
m_del_obj(mp_lexer_t, lex);
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-21 18:49:54 -05:00
|
|
|
#if 0
|
|
|
|
// This function is used to print the current token and should only be
|
|
|
|
// needed to debug the lexer, so it's not available via a config option.
|
2014-12-05 14:35:18 -05:00
|
|
|
void mp_lexer_show_token(const mp_lexer_t *lex) {
|
2014-12-05 17:50:16 -05:00
|
|
|
printf("(" UINT_FMT ":" UINT_FMT ") kind:%u str:%p len:%zu", lex->tok_line, lex->tok_column, lex->tok_kind, lex->vstr.buf, lex->vstr.len);
|
2014-12-05 14:35:18 -05:00
|
|
|
if (lex->vstr.len > 0) {
|
|
|
|
const byte *i = (const byte *)lex->vstr.buf;
|
|
|
|
const byte *j = (const byte *)i + lex->vstr.len;
|
|
|
|
printf(" ");
|
|
|
|
while (i < j) {
|
|
|
|
unichar c = utf8_get_char(i);
|
|
|
|
i = utf8_next_char(i);
|
|
|
|
if (unichar_isprint(c)) {
|
2015-06-22 12:40:12 -04:00
|
|
|
printf("%c", (int)c);
|
2014-12-05 14:35:18 -05:00
|
|
|
} else {
|
|
|
|
printf("?");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf("\n");
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
2014-12-05 14:35:18 -05:00
|
|
|
#endif
|
2015-12-18 07:35:44 -05:00
|
|
|
|
|
|
|
#endif // MICROPY_ENABLE_COMPILER
|