circuitpython/py/objgenerator.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

484 lines
18 KiB
C
Raw Normal View History

/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2013-2019 Damien P. George
* Copyright (c) 2014-2017 Paul Sokolovsky
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <stdlib.h>
#include <assert.h>
#include "py/runtime.h"
#include "py/bc.h"
#include "py/objstr.h"
#include "py/objgenerator.h"
#include "py/objfun.h"
#include "py/stackctrl.h"
// Instance of GeneratorExit exception - needed by generator.close()
// CIRCUITPY: https://github.com/adafruit/circuitpython/pull/7069 fix
#if MICROPY_CONST_GENERATOREXIT_OBJ
const
mp_obj_exception_t mp_const_GeneratorExit_obj = {{&mp_type_GeneratorExit}, (mp_obj_tuple_t *)&mp_const_empty_tuple_obj, (mp_obj_traceback_t *)&mp_const_empty_traceback_obj};
#else
static
mp_obj_exception_t mp_static_GeneratorExit_obj;
#endif
/******************************************************************************/
/* generator wrapper */
typedef struct _mp_obj_gen_instance_t {
mp_obj_base_t base;
// mp_const_none: Not-running, no exception.
// MP_OBJ_NULL: Running, no exception.
// other: Not running, pending exception.
mp_obj_t pend_exc;
mp_code_state_t code_state;
} mp_obj_gen_instance_t;
STATIC mp_obj_t gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
// A generating or coroutine function is just a bytecode function
// with type mp_type_gen_wrap or mp_type_coro_wrap.
mp_obj_fun_bc_t *self_fun = MP_OBJ_TO_PTR(self_in);
2013-12-30 17:32:17 -05:00
// bytecode prelude: get state size and exception stack size
const uint8_t *ip = self_fun->bytecode;
MP_BC_PRELUDE_SIG_DECODE(ip);
// allocate the generator or coroutine object, with room for local stack and exception stack
mp_obj_gen_instance_t *o = mp_obj_malloc_var(mp_obj_gen_instance_t, byte,
n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t),
#if MICROPY_PY_ASYNC_AWAIT
self_fun->base.type == &mp_type_gen_wrap ? &mp_type_gen_instance : &mp_type_coro_instance
#else
&mp_type_gen_instance
#endif
);
o->pend_exc = mp_const_none;
o->code_state.fun_bc = self_fun;
o->code_state.n_state = n_state;
mp_setup_code_state(&o->code_state, n_args, n_kw, args);
return MP_OBJ_FROM_PTR(o);
}
#if MICROPY_PY_FUNCTION_ATTRS
#define GEN_WRAP_TYPE_ATTR attr, mp_obj_fun_bc_attr,
#else
#define GEN_WRAP_TYPE_ATTR
#endif
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_gen_wrap,
MP_QSTR_generator,
MP_TYPE_FLAG_BINDS_SELF,
GEN_WRAP_TYPE_ATTR
call, gen_wrap_call,
unary_op, mp_generic_unary_op
);
2023-08-20 10:45:43 -04:00
#if MICROPY_PY_ASYNC_AWAIT
MP_DEFINE_CONST_OBJ_TYPE(
2023-09-20 12:24:15 -04:00
mp_type_coro_wrap,
MP_QSTR_coroutine,
MP_TYPE_FLAG_BINDS_SELF,
GEN_WRAP_TYPE_ATTR
call, gen_wrap_call,
unary_op, mp_generic_unary_op
);
2023-08-20 10:45:43 -04:00
#endif
/******************************************************************************/
// native generator wrapper
#if MICROPY_EMIT_NATIVE
// Based on mp_obj_gen_instance_t.
typedef struct _mp_obj_gen_instance_native_t {
mp_obj_base_t base;
mp_obj_t pend_exc;
mp_code_state_native_t code_state;
} mp_obj_gen_instance_native_t;
STATIC mp_obj_t native_gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
// The state for a native generating function is held in the same struct as a bytecode function
mp_obj_fun_bc_t *self_fun = MP_OBJ_TO_PTR(self_in);
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it's imported, but it's still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it's loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it's in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -> this-commit diff diff% (error%) bm_chaos.py 371.07 -> 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -> 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -> 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -> 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -> 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -> 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -> 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -> 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -> 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -> 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -> 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -> 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -> 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -> 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -> 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -> 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -> 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -> 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -> 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -> 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -> 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -> this-commit diff diff% (error%) bm_chaos.py 13594.20 -> 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -> 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -> 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -> 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -> 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -> 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -> 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -> 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -> 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -> 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -> 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George <damien@micropython.org>
2021-10-22 07:22:47 -04:00
// Determine start of prelude.
2023-08-20 15:26:48 -04:00
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-align"
py/emitnative: Put a pointer to the native prelude in child_table array. Some architectures (like esp32 xtensa) cannot read byte-wise from executable memory. This means the prelude for native functions -- which is usually located after the machine code for the native function -- must be placed in separate memory that can be read byte-wise. Prior to this commit this was achieved by enabling N_PRELUDE_AS_BYTES_OBJ for the emitter and MICROPY_EMIT_NATIVE_PRELUDE_AS_BYTES_OBJ for the runtime. The prelude was then placed in a bytes object, pointed to by the module's constant table. This behaviour is changed by this commit so that a pointer to the prelude is stored either in mp_obj_fun_bc_t.child_table, or in mp_obj_fun_bc_t.child_table[num_children] if num_children > 0. The reasons for doing this are: 1. It decouples the native emitter from runtime requirements, the emitted code no longer needs to know if the system it runs on can/can't read byte-wise from executable memory. 2. It makes all ports have the same emitter behaviour, there is no longer the N_PRELUDE_AS_BYTES_OBJ option. 3. The module's constant table is now used only for actual constants in the Python code. This allows further optimisations to be done with the constants (eg constant deduplication). Code size change for those ports that enable the native emitter: unix x64: +80 +0.015% stm32: +24 +0.004% PYBV10 esp8266: +88 +0.013% GENERIC esp32: -20 -0.002% GENERIC[incl -112(data)] rp2: +32 +0.005% PICO Signed-off-by: Damien George <damien@micropython.org>
2022-05-09 23:56:24 -04:00
uintptr_t prelude_ptr_index = ((uintptr_t *)self_fun->bytecode)[0];
2023-08-20 15:26:48 -04:00
#pragma GCC diagnostic pop
py/emitnative: Put a pointer to the native prelude in child_table array. Some architectures (like esp32 xtensa) cannot read byte-wise from executable memory. This means the prelude for native functions -- which is usually located after the machine code for the native function -- must be placed in separate memory that can be read byte-wise. Prior to this commit this was achieved by enabling N_PRELUDE_AS_BYTES_OBJ for the emitter and MICROPY_EMIT_NATIVE_PRELUDE_AS_BYTES_OBJ for the runtime. The prelude was then placed in a bytes object, pointed to by the module's constant table. This behaviour is changed by this commit so that a pointer to the prelude is stored either in mp_obj_fun_bc_t.child_table, or in mp_obj_fun_bc_t.child_table[num_children] if num_children > 0. The reasons for doing this are: 1. It decouples the native emitter from runtime requirements, the emitted code no longer needs to know if the system it runs on can/can't read byte-wise from executable memory. 2. It makes all ports have the same emitter behaviour, there is no longer the N_PRELUDE_AS_BYTES_OBJ option. 3. The module's constant table is now used only for actual constants in the Python code. This allows further optimisations to be done with the constants (eg constant deduplication). Code size change for those ports that enable the native emitter: unix x64: +80 +0.015% stm32: +24 +0.004% PYBV10 esp8266: +88 +0.013% GENERIC esp32: -20 -0.002% GENERIC[incl -112(data)] rp2: +32 +0.005% PICO Signed-off-by: Damien George <damien@micropython.org>
2022-05-09 23:56:24 -04:00
const uint8_t *prelude_ptr;
if (prelude_ptr_index == 0) {
prelude_ptr = (void *)self_fun->child_table;
} else {
prelude_ptr = (void *)self_fun->child_table[prelude_ptr_index];
}
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it's imported, but it's still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it's loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it's in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -> this-commit diff diff% (error%) bm_chaos.py 371.07 -> 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -> 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -> 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -> 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -> 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -> 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -> 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -> 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -> 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -> 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -> 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -> 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -> 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -> 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -> 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -> 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -> 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -> 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -> 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -> 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -> 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -> this-commit diff diff% (error%) bm_chaos.py 13594.20 -> 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -> 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -> 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -> 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -> 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -> 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -> 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -> 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -> 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -> 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -> 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George <damien@micropython.org>
2021-10-22 07:22:47 -04:00
// Extract n_state from the prelude.
const uint8_t *ip = prelude_ptr;
MP_BC_PRELUDE_SIG_DECODE(ip);
// Allocate the generator object, with room for local stack (exception stack not needed).
2023-08-20 10:45:43 -04:00
mp_obj_gen_instance_native_t *o = mp_obj_malloc_var(mp_obj_gen_instance_native_t, byte, n_state * sizeof(mp_obj_t),
#if MICROPY_PY_ASYNC_AWAIT
(self_fun->base.type == &mp_type_native_gen_wrap) ? &mp_type_gen_instance : &mp_type_coro_instance
#else
&mp_type_gen_instance
#endif
);
// Parse the input arguments and set up the code state
o->pend_exc = mp_const_none;
o->code_state.fun_bc = self_fun;
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it's imported, but it's still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it's loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it's in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -> this-commit diff diff% (error%) bm_chaos.py 371.07 -> 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -> 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -> 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -> 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -> 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -> 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -> 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -> 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -> 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -> 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -> 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -> 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -> 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -> 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -> 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -> 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -> 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -> 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -> 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -> 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -> 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -> this-commit diff diff% (error%) bm_chaos.py 13594.20 -> 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -> 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -> 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -> 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -> 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -> 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -> 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -> 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -> 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -> 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -> 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George <damien@micropython.org>
2021-10-22 07:22:47 -04:00
o->code_state.ip = prelude_ptr;
o->code_state.n_state = n_state;
o->code_state.sp = &o->code_state.state[0] - 1;
mp_setup_code_state_native(&o->code_state, n_args, n_kw, args);
// Indicate we are a native function, which doesn't use this variable
o->code_state.exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_SENTINEL;
// Prepare the generator instance for execution
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-align"
2021-04-20 01:22:44 -04:00
uintptr_t start_offset = ((uintptr_t *)self_fun->bytecode)[1];
#pragma GCC diagnostic pop
2021-04-20 01:22:44 -04:00
o->code_state.ip = MICROPY_MAKE_POINTER_CALLABLE((void *)(self_fun->bytecode + start_offset));
return MP_OBJ_FROM_PTR(o);
}
#if MICROPY_PY_FUNCTION_ATTRS
#define NATIVE_GEN_WRAP_TYPE_ATTR attr, mp_obj_fun_bc_attr,
#else
#define NATIVE_GEN_WRAP_TYPE_ATTR
#endif
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_native_gen_wrap,
MP_QSTR_generator,
MP_TYPE_FLAG_BINDS_SELF,
call, native_gen_wrap_call,
NATIVE_GEN_WRAP_TYPE_ATTR
unary_op, mp_generic_unary_op
);
2023-08-20 10:45:43 -04:00
#if MICROPY_PY_ASYNC_AWAIT
MP_DEFINE_CONST_OBJ_TYPE(
2023-09-20 12:24:15 -04:00
mp_type_native_coro_wrap,
MP_QSTR_coroutine,
MP_TYPE_FLAG_BINDS_SELF,
GEN_WRAP_TYPE_ATTR
call, native_gen_wrap_call,
unary_op, mp_generic_unary_op
);
2023-08-20 10:45:43 -04:00
#endif
#endif // MICROPY_EMIT_NATIVE
/******************************************************************************/
/* generator instance */
STATIC void gen_instance_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
(void)kind;
mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
mp_printf(print, "<generator object '%q' at %p>", mp_obj_fun_get_name(MP_OBJ_FROM_PTR(self->code_state.fun_bc)), self);
}
// CIRCUITPY
#if MICROPY_PY_ASYNC_AWAIT
STATIC void coro_instance_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
(void)kind;
mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
mp_printf(print, "<coroutine object '%q' at %p>", mp_obj_fun_get_name(MP_OBJ_FROM_PTR(self->code_state.fun_bc)), self);
}
#endif
mp_vm_return_kind_t mp_obj_gen_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, mp_obj_t *ret_val) {
MP_STACK_CHECK();
// CIRCUITPY
// note that self may have as its type either gen or coro,
// both of which are stored as an mp_obj_gen_instance_t .
mp_check_self(
mp_obj_is_type(self_in, &mp_type_gen_instance)
#if MICROPY_PY_ASYNC_AWAIT
|| mp_obj_is_type(self_in, &mp_type_coro_instance)
#endif
);
mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
if (self->code_state.ip == 0) {
// Trying to resume an already stopped generator.
// This is an optimised "raise StopIteration(None)".
*ret_val = mp_const_none;
return MP_VM_RETURN_NORMAL;
}
// Ensure the generator cannot be reentered during execution
if (self->pend_exc == MP_OBJ_NULL) {
mp_raise_ValueError(MP_ERROR_TEXT("generator already executing"));
}
#if MICROPY_PY_GENERATOR_PEND_THROW
// If exception is pending (set using .pend_throw()), process it now.
if (self->pend_exc != mp_const_none) {
throw_value = self->pend_exc;
}
#endif
// If the generator is started, allow sending a value.
void *state_start = self->code_state.state - 1;
#if MICROPY_EMIT_NATIVE
if (self->code_state.exc_sp_idx == MP_CODE_STATE_EXC_SP_IDX_SENTINEL) {
state_start = ((mp_obj_gen_instance_native_t *)self)->code_state.state - 1;
}
#endif
if (self->code_state.sp == state_start) {
if (send_value != mp_const_none) {
mp_raise_TypeError(MP_ERROR_TEXT("can't send non-None value to a just-started generator"));
}
} else {
*self->code_state.sp = send_value;
}
// Mark as running
self->pend_exc = MP_OBJ_NULL;
// Set up the correct globals context for the generator and execute it
self->code_state.old_globals = mp_globals_get();
py: Rework bytecode and .mpy file format to be mostly static data. Background: .mpy files are precompiled .py files, built using mpy-cross, that contain compiled bytecode functions (and can also contain machine code). The benefit of using an .mpy file over a .py file is that they are faster to import and take less memory when importing. They are also smaller on disk. But the real benefit of .mpy files comes when they are frozen into the firmware. This is done by loading the .mpy file during compilation of the firmware and turning it into a set of big C data structures (the job of mpy-tool.py), which are then compiled and downloaded into the ROM of a device. These C data structures can be executed in-place, ie directly from ROM. This makes importing even faster because there is very little to do, and also means such frozen modules take up much less RAM (because their bytecode stays in ROM). The downside of frozen code is that it requires recompiling and reflashing the entire firmware. This can be a big barrier to entry, slows down development time, and makes it harder to do OTA updates of frozen code (because the whole firmware must be updated). This commit attempts to solve this problem by providing a solution that sits between loading .mpy files into RAM and freezing them into the firmware. The .mpy file format has been reworked so that it consists of data and bytecode which is mostly static and ready to run in-place. If these new .mpy files are located in flash/ROM which is memory addressable, the .mpy file can be executed (mostly) in-place. With this approach there is still a small amount of unpacking and linking of the .mpy file that needs to be done when it's imported, but it's still much better than loading an .mpy from disk into RAM (although not as good as freezing .mpy files into the firmware). The main trick to make static .mpy files is to adjust the bytecode so any qstrs that it references now go through a lookup table to convert from local qstr number in the module to global qstr number in the firmware. That means the bytecode does not need linking/rewriting of qstrs when it's loaded. Instead only a small qstr table needs to be built (and put in RAM) at import time. This means the bytecode itself is static/constant and can be used directly if it's in addressable memory. Also the qstr string data in the .mpy file, and some constant object data, can be used directly. Note that the qstr table is global to the module (ie not per function). In more detail, in the VM what used to be (schematically): qst = DECODE_QSTR_VALUE; is now (schematically): idx = DECODE_QSTR_INDEX; qst = qstr_table[idx]; That allows the bytecode to be fixed at compile time and not need relinking/rewriting of the qstr values. Only qstr_table needs to be linked when the .mpy is loaded. Incidentally, this helps to reduce the size of bytecode because what used to be 2-byte qstr values in the bytecode are now (mostly) 1-byte indices. If the module uses the same qstr more than two times then the bytecode is smaller than before. The following changes are measured for this commit compared to the previous (the baseline): - average 7%-9% reduction in size of .mpy files - frozen code size is reduced by about 5%-7% - importing .py files uses about 5% less RAM in total - importing .mpy files uses about 4% less RAM in total - importing .py and .mpy files takes about the same time as before The qstr indirection in the bytecode has only a small impact on VM performance. For stm32 on PYBv1.0 the performance change of this commit is: diff of scores (higher is better) N=100 M=100 baseline -> this-commit diff diff% (error%) bm_chaos.py 371.07 -> 357.39 : -13.68 = -3.687% (+/-0.02%) bm_fannkuch.py 78.72 -> 77.49 : -1.23 = -1.563% (+/-0.01%) bm_fft.py 2591.73 -> 2539.28 : -52.45 = -2.024% (+/-0.00%) bm_float.py 6034.93 -> 5908.30 : -126.63 = -2.098% (+/-0.01%) bm_hexiom.py 48.96 -> 47.93 : -1.03 = -2.104% (+/-0.00%) bm_nqueens.py 4510.63 -> 4459.94 : -50.69 = -1.124% (+/-0.00%) bm_pidigits.py 650.28 -> 644.96 : -5.32 = -0.818% (+/-0.23%) core_import_mpy_multi.py 564.77 -> 581.49 : +16.72 = +2.960% (+/-0.01%) core_import_mpy_single.py 68.67 -> 67.16 : -1.51 = -2.199% (+/-0.01%) core_qstr.py 64.16 -> 64.12 : -0.04 = -0.062% (+/-0.00%) core_yield_from.py 362.58 -> 354.50 : -8.08 = -2.228% (+/-0.00%) misc_aes.py 429.69 -> 405.59 : -24.10 = -5.609% (+/-0.01%) misc_mandel.py 3485.13 -> 3416.51 : -68.62 = -1.969% (+/-0.00%) misc_pystone.py 2496.53 -> 2405.56 : -90.97 = -3.644% (+/-0.01%) misc_raytrace.py 381.47 -> 374.01 : -7.46 = -1.956% (+/-0.01%) viper_call0.py 576.73 -> 572.49 : -4.24 = -0.735% (+/-0.04%) viper_call1a.py 550.37 -> 546.21 : -4.16 = -0.756% (+/-0.09%) viper_call1b.py 438.23 -> 435.68 : -2.55 = -0.582% (+/-0.06%) viper_call1c.py 442.84 -> 440.04 : -2.80 = -0.632% (+/-0.08%) viper_call2a.py 536.31 -> 532.35 : -3.96 = -0.738% (+/-0.06%) viper_call2b.py 382.34 -> 377.07 : -5.27 = -1.378% (+/-0.03%) And for unix on x64: diff of scores (higher is better) N=2000 M=2000 baseline -> this-commit diff diff% (error%) bm_chaos.py 13594.20 -> 13073.84 : -520.36 = -3.828% (+/-5.44%) bm_fannkuch.py 60.63 -> 59.58 : -1.05 = -1.732% (+/-3.01%) bm_fft.py 112009.15 -> 111603.32 : -405.83 = -0.362% (+/-4.03%) bm_float.py 246202.55 -> 247923.81 : +1721.26 = +0.699% (+/-2.79%) bm_hexiom.py 615.65 -> 617.21 : +1.56 = +0.253% (+/-1.64%) bm_nqueens.py 215807.95 -> 215600.96 : -206.99 = -0.096% (+/-3.52%) bm_pidigits.py 8246.74 -> 8422.82 : +176.08 = +2.135% (+/-3.64%) misc_aes.py 16133.00 -> 16452.74 : +319.74 = +1.982% (+/-1.50%) misc_mandel.py 128146.69 -> 130796.43 : +2649.74 = +2.068% (+/-3.18%) misc_pystone.py 83811.49 -> 83124.85 : -686.64 = -0.819% (+/-1.03%) misc_raytrace.py 21688.02 -> 21385.10 : -302.92 = -1.397% (+/-3.20%) The code size change is (firmware with a lot of frozen code benefits the most): bare-arm: +396 +0.697% minimal x86: +1595 +0.979% [incl +32(data)] unix x64: +2408 +0.470% [incl +800(data)] unix nanbox: +1396 +0.309% [incl -96(data)] stm32: -1256 -0.318% PYBV10 cc3200: +288 +0.157% esp8266: -260 -0.037% GENERIC esp32: -216 -0.014% GENERIC[incl -1072(data)] nrf: +116 +0.067% pca10040 rp2: -664 -0.135% PICO samd: +844 +0.607% ADAFRUIT_ITSYBITSY_M4_EXPRESS As part of this change the .mpy file format version is bumped to version 6. And mpy-tool.py has been improved to provide a good visualisation of the contents of .mpy files. In summary: this commit changes the bytecode to use qstr indirection, and reworks the .mpy file format to be simpler and allow .mpy files to be executed in-place. Performance is not impacted too much. Eventually it will be possible to store such .mpy files in a linear, read-only, memory- mappable filesystem so they can be executed from flash/ROM. This will essentially be able to replace frozen code for most applications. Signed-off-by: Damien George <damien@micropython.org>
2021-10-22 07:22:47 -04:00
mp_globals_set(self->code_state.fun_bc->context->module.globals);
mp_vm_return_kind_t ret_kind;
#if MICROPY_EMIT_NATIVE
if (self->code_state.exc_sp_idx == MP_CODE_STATE_EXC_SP_IDX_SENTINEL) {
// A native generator, with entry point 2 words into the "bytecode" pointer
2021-04-20 01:22:44 -04:00
typedef uintptr_t (*mp_fun_native_gen_t)(void *, mp_obj_t);
mp_fun_native_gen_t fun = MICROPY_MAKE_POINTER_CALLABLE((const void *)(self->code_state.fun_bc->bytecode + 2 * sizeof(uintptr_t)));
ret_kind = fun((void *)&self->code_state, throw_value);
} else
#endif
{
// A bytecode generator
ret_kind = mp_execute_bytecode(&self->code_state, throw_value);
}
mp_globals_set(self->code_state.old_globals);
// Mark as not running
self->pend_exc = mp_const_none;
switch (ret_kind) {
case MP_VM_RETURN_NORMAL:
default:
// Explicitly mark generator as completed. If we don't do this,
// subsequent next() may re-execute statements after last yield
// again and again, leading to side effects.
self->code_state.ip = 0;
// This is an optimised "raise StopIteration(*ret_val)".
*ret_val = *self->code_state.sp;
break;
case MP_VM_RETURN_YIELD:
*ret_val = *self->code_state.sp;
#if MICROPY_PY_GENERATOR_PEND_THROW
*self->code_state.sp = mp_const_none;
#endif
break;
case MP_VM_RETURN_EXCEPTION: {
self->code_state.ip = 0;
#if MICROPY_EMIT_NATIVE
if (self->code_state.exc_sp_idx == MP_CODE_STATE_EXC_SP_IDX_SENTINEL) {
*ret_val = ((mp_obj_gen_instance_native_t *)self)->code_state.state[0];
} else
#endif
{
*ret_val = self->code_state.state[0];
}
// PEP479: if StopIteration is raised inside a generator it is replaced with RuntimeError
if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(*ret_val)), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
*ret_val = mp_obj_new_exception_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("generator raised StopIteration"));
}
break;
}
}
return ret_kind;
}
STATIC mp_obj_t gen_resume_and_raise(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, bool raise_stop_iteration) {
mp_obj_t ret;
switch (mp_obj_gen_resume(self_in, send_value, throw_value, &ret)) {
case MP_VM_RETURN_NORMAL:
default:
// A normal return is a StopIteration, either raise it or return
// MP_OBJ_STOP_ITERATION as an optimisation.
if (ret == mp_const_none) {
ret = MP_OBJ_NULL;
}
if (raise_stop_iteration) {
mp_raise_StopIteration(ret);
} else {
return mp_make_stop_iteration(ret);
}
case MP_VM_RETURN_YIELD:
return ret;
case MP_VM_RETURN_EXCEPTION:
nlr_raise(ret);
}
}
STATIC mp_obj_t gen_instance_iternext(mp_obj_t self_in) {
return gen_resume_and_raise(self_in, mp_const_none, MP_OBJ_NULL, false);
}
STATIC mp_obj_t gen_instance_send(mp_obj_t self_in, mp_obj_t send_value) {
return gen_resume_and_raise(self_in, send_value, MP_OBJ_NULL, true);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(gen_instance_send_obj, gen_instance_send);
#if MICROPY_PY_ASYNC_AWAIT
STATIC mp_obj_t coro_instance_await(mp_obj_t self_in) {
return self_in;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(coro_instance_await_obj, coro_instance_await);
#endif
STATIC mp_obj_t gen_instance_throw(size_t n_args, const mp_obj_t *args) {
// The signature of this function is: throw(type[, value[, traceback]])
// CPython will pass all given arguments through the call chain and process them
// at the point they are used (native generators will handle them differently to
// user-defined generators with a throw() method). To save passing multiple
// values, MicroPython instead does partial processing here to reduce it down to
// one argument and passes that through:
// - if only args[1] is given, or args[2] is given but is None, args[1] is
// passed through (in the standard case it is an exception class or instance)
// - if args[2] is given and not None it is passed through (in the standard
// case it would be an exception instance and args[1] its corresponding class)
// - args[3] is always ignored
mp_obj_t exc = args[1];
if (n_args > 2 && args[2] != mp_const_none) {
exc = args[2];
}
return gen_resume_and_raise(args[0], mp_const_none, exc, true);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(gen_instance_throw_obj, 2, 4, gen_instance_throw);
static mp_obj_t generatorexit(void) {
#if MICROPY_CPYTHON_EXCEPTION_CHAIN
MP_STATIC_ASSERT(!MICROPY_CONST_GENERATOREXIT_OBJ);
mp_obj_exception_initialize0(&mp_static_GeneratorExit_obj, &mp_type_GeneratorExit);
#endif
return MP_OBJ_FROM_PTR(&mp_const_GeneratorExit_obj);
}
STATIC mp_obj_t gen_instance_close(mp_obj_t self_in) {
mp_obj_t ret;
switch (mp_obj_gen_resume(self_in, mp_const_none, generatorexit(), &ret)) {
case MP_VM_RETURN_YIELD:
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("generator ignored GeneratorExit"));
// Swallow GeneratorExit (== successful close), and re-raise any other
case MP_VM_RETURN_EXCEPTION:
// ret should always be an instance of an exception class
if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(ret)), MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) {
return mp_const_none;
}
nlr_raise(ret);
default:
// The only choice left is MP_VM_RETURN_NORMAL which is successful close
return mp_const_none;
}
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(gen_instance_close_obj, gen_instance_close);
#if MICROPY_PY_GENERATOR_PEND_THROW
STATIC mp_obj_t gen_instance_pend_throw(mp_obj_t self_in, mp_obj_t exc_in) {
mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
if (self->pend_exc == MP_OBJ_NULL) {
mp_raise_ValueError(MP_ERROR_TEXT("generator already executing"));
}
mp_obj_t prev = self->pend_exc;
self->pend_exc = exc_in;
return prev;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(gen_instance_pend_throw_obj, gen_instance_pend_throw);
#endif
STATIC const mp_rom_map_elem_t gen_instance_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR_close), MP_ROM_PTR(&gen_instance_close_obj) },
{ MP_ROM_QSTR(MP_QSTR_send), MP_ROM_PTR(&gen_instance_send_obj) },
{ MP_ROM_QSTR(MP_QSTR_throw), MP_ROM_PTR(&gen_instance_throw_obj) },
#if MICROPY_PY_GENERATOR_PEND_THROW
{ MP_ROM_QSTR(MP_QSTR_pend_throw), MP_ROM_PTR(&gen_instance_pend_throw_obj) },
#endif
};
STATIC MP_DEFINE_CONST_DICT(gen_instance_locals_dict, gen_instance_locals_dict_table);
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_gen_instance,
MP_QSTR_generator,
MP_TYPE_FLAG_ITER_IS_ITERNEXT,
print, gen_instance_print,
unary_op, mp_generic_unary_op,
iter, gen_instance_iternext,
locals_dict, &gen_instance_locals_dict
);
2023-09-20 12:24:15 -04:00
#if MICROPY_PY_ASYNC_AWAIT
// CIRCUITPY
// coroutine instance locals dict and type
// same as generator, but with addition of __await()__.
STATIC const mp_rom_map_elem_t coro_instance_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR_close), MP_ROM_PTR(&gen_instance_close_obj) },
{ MP_ROM_QSTR(MP_QSTR_send), MP_ROM_PTR(&gen_instance_send_obj) },
{ MP_ROM_QSTR(MP_QSTR_throw), MP_ROM_PTR(&gen_instance_throw_obj) },
#if MICROPY_PY_GENERATOR_PEND_THROW
{ MP_ROM_QSTR(MP_QSTR_pend_throw), MP_ROM_PTR(&gen_instance_pend_throw_obj) },
#endif
{ MP_ROM_QSTR(MP_QSTR___await__), MP_ROM_PTR(&coro_instance_await_obj) },
};
STATIC MP_DEFINE_CONST_DICT(coro_instance_locals_dict, coro_instance_locals_dict_table);
MP_DEFINE_CONST_OBJ_TYPE(
mp_type_coro_instance,
MP_QSTR_coroutine,
MP_TYPE_FLAG_ITER_IS_ITERNEXT,
print, coro_instance_print,
unary_op, mp_generic_unary_op,
iter, gen_instance_iternext,
locals_dict, &coro_instance_locals_dict
);
#endif