2018-04-10 01:06:47 -04:00
|
|
|
// x86 specific stuff
|
|
|
|
|
|
|
|
#include "py/mpconfig.h"
|
2018-05-04 06:39:16 -04:00
|
|
|
#include "py/runtime0.h"
|
2018-04-10 01:06:47 -04:00
|
|
|
|
|
|
|
#if MICROPY_EMIT_X86
|
|
|
|
|
|
|
|
// This is defined so that the assembler exports generic assembler API macros
|
|
|
|
#define GENERIC_ASM_API (1)
|
|
|
|
#include "py/asmx86.h"
|
|
|
|
|
2018-09-04 00:31:28 -04:00
|
|
|
// Word indices of REG_LOCAL_x in nlr_buf_t
|
|
|
|
#define NLR_BUF_IDX_LOCAL_1 (5) // ebx
|
|
|
|
#define NLR_BUF_IDX_LOCAL_2 (7) // esi
|
|
|
|
#define NLR_BUF_IDX_LOCAL_3 (6) // edi
|
py/emitnative: Optimise and improve exception handling in native code.
Prior to this patch, native code would use a full nlr_buf_t for each
exception handler (try-except, try-finally, with). For nested exception
handlers this would use a lot of C stack and be rather inefficient.
This patch changes how exceptions are handled in native code by setting up
only a single nlr_buf_t context for the entire function, and then manages a
state machine (using the PC) to work out which exception handler to run
when an exception is raised by an nlr_jump. This keeps the C stack usage
at a constant level regardless of the depth of Python exception blocks.
The patch also fixes an existing bug when local variables are written to
within an exception handler, then their value was incorrectly restored if
an exception was raised (since the nlr_jump would restore register values,
back to the point of the nlr_push).
And it also gets nested try-finally+with working with the viper emitter.
Broadly speaking, efficiency of executing native code that doesn't use
any exception blocks is unchanged, and emitted code size is only slightly
increased for such function. C stack usage of all native functions is
either equal or less than before. Emitted code size for native functions
that use exception blocks is increased by roughly 10% (due in part to
fixing of above-mentioned bugs).
But, most importantly, this patch allows to implement more Python features
in native code, like unwind jumps and yielding from within nested exception
blocks.
2018-08-15 23:56:36 -04:00
|
|
|
|
2018-04-10 01:06:47 -04:00
|
|
|
// x86 needs a table to know how many args a given function has
|
|
|
|
STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
|
|
|
|
[MP_F_CONVERT_OBJ_TO_NATIVE] = 2,
|
|
|
|
[MP_F_CONVERT_NATIVE_TO_OBJ] = 2,
|
py: Fix native functions so they run with their correct globals context.
Prior to this commit a function compiled with the native decorator
@micropython.native would not work correctly when accessing global
variables, because the globals dict was not being set upon function entry.
This commit fixes this problem by, upon function entry, setting as the
current globals dict the globals dict context the function was defined
within, as per normal Python semantics, and as bytecode does. Upon
function exit the original globals dict is restored.
In order to restore the globals dict when an exception is raised the native
function must guard its internals with an nlr_push/nlr_pop pair. Because
this push/pop is relatively expensive, in both C stack usage for the
nlr_buf_t and CPU execution time, the implementation here optimises things
as much as possible. First, the compiler keeps track of whether a function
even needs to access global variables. Using this information the native
emitter then generates three different kinds of code:
1. no globals used, no exception handlers: no nlr handling code and no
setting of the globals dict.
2. globals used, no exception handlers: an nlr_buf_t is allocated on the
C stack but it is not used if the globals dict is unchanged, saving
execution time because nlr_push/nlr_pop don't need to run.
3. function has exception handlers, may use globals: an nlr_buf_t is
allocated and nlr_push/nlr_pop are always called.
In the end, native functions that don't access globals and don't have
exception handlers will run more efficiently than those that do.
Fixes issue #1573.
2018-09-13 08:03:48 -04:00
|
|
|
[MP_F_NATIVE_SWAP_GLOBALS] = 1,
|
2018-04-10 01:06:47 -04:00
|
|
|
[MP_F_LOAD_NAME] = 1,
|
|
|
|
[MP_F_LOAD_GLOBAL] = 1,
|
|
|
|
[MP_F_LOAD_BUILD_CLASS] = 0,
|
|
|
|
[MP_F_LOAD_ATTR] = 2,
|
|
|
|
[MP_F_LOAD_METHOD] = 3,
|
|
|
|
[MP_F_LOAD_SUPER_METHOD] = 2,
|
|
|
|
[MP_F_STORE_NAME] = 2,
|
|
|
|
[MP_F_STORE_GLOBAL] = 2,
|
|
|
|
[MP_F_STORE_ATTR] = 3,
|
|
|
|
[MP_F_OBJ_SUBSCR] = 3,
|
|
|
|
[MP_F_OBJ_IS_TRUE] = 1,
|
|
|
|
[MP_F_UNARY_OP] = 2,
|
|
|
|
[MP_F_BINARY_OP] = 3,
|
|
|
|
[MP_F_BUILD_TUPLE] = 2,
|
|
|
|
[MP_F_BUILD_LIST] = 2,
|
|
|
|
[MP_F_BUILD_MAP] = 1,
|
|
|
|
[MP_F_BUILD_SET] = 2,
|
|
|
|
[MP_F_STORE_SET] = 2,
|
2019-11-07 03:43:23 -05:00
|
|
|
[MP_F_LIST_APPEND] = 2,
|
|
|
|
[MP_F_STORE_MAP] = 3,
|
2018-04-10 01:06:47 -04:00
|
|
|
[MP_F_MAKE_FUNCTION_FROM_RAW_CODE] = 3,
|
|
|
|
[MP_F_NATIVE_CALL_FUNCTION_N_KW] = 3,
|
|
|
|
[MP_F_CALL_METHOD_N_KW] = 3,
|
|
|
|
[MP_F_CALL_METHOD_N_KW_VAR] = 3,
|
|
|
|
[MP_F_NATIVE_GETITER] = 2,
|
|
|
|
[MP_F_NATIVE_ITERNEXT] = 1,
|
|
|
|
[MP_F_NLR_PUSH] = 1,
|
|
|
|
[MP_F_NLR_POP] = 0,
|
|
|
|
[MP_F_NATIVE_RAISE] = 1,
|
|
|
|
[MP_F_IMPORT_NAME] = 3,
|
|
|
|
[MP_F_IMPORT_FROM] = 2,
|
|
|
|
[MP_F_IMPORT_ALL] = 1,
|
|
|
|
[MP_F_NEW_SLICE] = 3,
|
|
|
|
[MP_F_UNPACK_SEQUENCE] = 3,
|
|
|
|
[MP_F_UNPACK_EX] = 3,
|
|
|
|
[MP_F_DELETE_NAME] = 1,
|
|
|
|
[MP_F_DELETE_GLOBAL] = 1,
|
|
|
|
[MP_F_MAKE_CLOSURE_FROM_RAW_CODE] = 3,
|
2018-09-14 03:40:59 -04:00
|
|
|
[MP_F_ARG_CHECK_NUM_SIG] = 3,
|
2018-08-17 01:03:51 -04:00
|
|
|
[MP_F_SETUP_CODE_STATE] = 4,
|
2018-04-10 01:06:47 -04:00
|
|
|
[MP_F_SMALL_INT_FLOOR_DIVIDE] = 2,
|
|
|
|
[MP_F_SMALL_INT_MODULO] = 2,
|
2018-09-30 23:07:04 -04:00
|
|
|
[MP_F_NATIVE_YIELD_FROM] = 3,
|
2019-11-07 03:43:23 -05:00
|
|
|
[MP_F_SETJMP] = 1,
|
2018-04-10 01:06:47 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
#define N_X86 (1)
|
|
|
|
#define EXPORT_FUN(name) emit_native_x86_##name
|
|
|
|
#include "py/emitnative.c"
|
|
|
|
|
|
|
|
#endif
|