2014-05-03 18:27:38 -04:00
|
|
|
/*
|
|
|
|
* This file is part of the Micro Python project, http://micropython.org/
|
|
|
|
*
|
|
|
|
* The MIT License (MIT)
|
|
|
|
*
|
|
|
|
* Copyright (c) 2013, 2014 Damien P. George
|
2014-05-13 01:44:45 -04:00
|
|
|
* Copyright (c) 2014 Paul Sokolovsky
|
2014-05-03 18:27:38 -04:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2013-10-04 14:53:11 -04:00
|
|
|
#include <stdio.h>
|
2014-03-30 10:35:53 -04:00
|
|
|
#include <string.h>
|
2013-10-04 14:53:11 -04:00
|
|
|
#include <assert.h>
|
|
|
|
|
2015-01-01 18:30:53 -05:00
|
|
|
#include "py/mpstate.h"
|
2015-01-01 15:27:54 -05:00
|
|
|
#include "py/nlr.h"
|
|
|
|
#include "py/emitglue.h"
|
2015-01-06 07:51:39 -05:00
|
|
|
#include "py/objtype.h"
|
2015-01-01 15:27:54 -05:00
|
|
|
#include "py/runtime.h"
|
|
|
|
#include "py/bc0.h"
|
|
|
|
#include "py/bc.h"
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2014-04-22 20:40:24 -04:00
|
|
|
#if 0
|
2014-12-12 12:18:56 -05:00
|
|
|
#define TRACE(ip) printf("sp=" INT_FMT " ", sp - code_state->sp); mp_bytecode_print2(ip, 1);
|
2014-04-22 20:40:24 -04:00
|
|
|
#else
|
|
|
|
#define TRACE(ip)
|
|
|
|
#endif
|
2014-04-10 12:21:34 -04:00
|
|
|
|
2014-01-31 12:45:15 -05:00
|
|
|
// Value stack grows up (this makes it incompatible with native C stack, but
|
|
|
|
// makes sure that arguments to functions are in natural order arg1..argN
|
|
|
|
// (Python semantics mandates left-to-right evaluation order, including for
|
|
|
|
// function arguments). Stack pointer is pre-incremented and points at the
|
|
|
|
// top element.
|
|
|
|
// Exception stack also grows up, top element is also pointed at.
|
|
|
|
|
2014-01-31 17:55:05 -05:00
|
|
|
// Exception stack unwind reasons (WHY_* in CPython-speak)
|
2014-02-01 15:08:18 -05:00
|
|
|
// TODO perhaps compress this to RETURN=0, JUMP>0, with number of unwinds
|
|
|
|
// left to do encoded in the JUMP number
|
2014-01-31 17:55:05 -05:00
|
|
|
typedef enum {
|
|
|
|
UNWIND_RETURN = 1,
|
2014-02-01 15:08:18 -05:00
|
|
|
UNWIND_JUMP,
|
2014-01-31 17:55:05 -05:00
|
|
|
} mp_unwind_reason_t;
|
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
#define DECODE_UINT \
|
|
|
|
mp_uint_t unum = 0; \
|
2014-02-18 14:21:22 -05:00
|
|
|
do { \
|
|
|
|
unum = (unum << 7) + (*ip & 0x7f); \
|
2014-12-02 14:25:10 -05:00
|
|
|
} while ((*ip++ & 0x80) != 0)
|
|
|
|
#define DECODE_ULABEL mp_uint_t ulab = (ip[0] | (ip[1] << 8)); ip += 2
|
|
|
|
#define DECODE_SLABEL mp_uint_t slab = (ip[0] | (ip[1] << 8)) - 0x8000; ip += 2
|
2014-05-25 17:58:04 -04:00
|
|
|
#define DECODE_QSTR qstr qst = 0; \
|
2014-02-18 14:21:22 -05:00
|
|
|
do { \
|
|
|
|
qst = (qst << 7) + (*ip & 0x7f); \
|
2014-05-25 17:58:04 -04:00
|
|
|
} while ((*ip++ & 0x80) != 0)
|
2014-12-02 14:25:10 -05:00
|
|
|
#define DECODE_PTR \
|
2014-07-03 08:25:24 -04:00
|
|
|
ip = (byte*)(((mp_uint_t)ip + sizeof(mp_uint_t) - 1) & (~(sizeof(mp_uint_t) - 1))); /* align ip */ \
|
2014-12-02 14:25:10 -05:00
|
|
|
void *ptr = (void*)*(mp_uint_t*)ip; \
|
|
|
|
ip += sizeof(mp_uint_t)
|
2014-01-18 09:10:48 -05:00
|
|
|
#define PUSH(val) *++sp = (val)
|
|
|
|
#define POP() (*sp--)
|
2013-12-10 12:27:24 -05:00
|
|
|
#define TOP() (*sp)
|
|
|
|
#define SET_TOP(val) *sp = (val)
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2015-04-25 18:20:49 -04:00
|
|
|
#if MICROPY_PY_SYS_EXC_INFO
|
|
|
|
#define CLEAR_SYS_EXC_INFO() MP_STATE_VM(cur_exception) = MP_OBJ_NULL;
|
|
|
|
#else
|
|
|
|
#define CLEAR_SYS_EXC_INFO()
|
|
|
|
#endif
|
|
|
|
|
2014-12-22 07:49:57 -05:00
|
|
|
#define PUSH_EXC_BLOCK(with_or_finally) do { \
|
2014-03-28 20:49:07 -04:00
|
|
|
DECODE_ULABEL; /* except labels are always forward */ \
|
|
|
|
++exc_sp; \
|
2014-12-02 14:25:10 -05:00
|
|
|
exc_sp->handler = ip + ulab; \
|
2014-12-22 07:49:57 -05:00
|
|
|
exc_sp->val_sp = MP_TAGPTR_MAKE(sp, ((with_or_finally) << 1) | currently_in_except_block); \
|
2014-03-29 20:54:48 -04:00
|
|
|
exc_sp->prev_exc = MP_OBJ_NULL; \
|
2014-12-02 14:25:10 -05:00
|
|
|
currently_in_except_block = 0; /* in a try block now */ \
|
|
|
|
} while (0)
|
2014-03-28 20:49:07 -04:00
|
|
|
|
2014-03-29 17:16:27 -04:00
|
|
|
#define POP_EXC_BLOCK() \
|
2014-12-22 07:49:57 -05:00
|
|
|
currently_in_except_block = MP_TAGPTR_TAG0(exc_sp->val_sp); /* restore previous state */ \
|
2015-04-25 18:20:49 -04:00
|
|
|
exc_sp--; /* pop back to previous exception handler */ \
|
|
|
|
CLEAR_SYS_EXC_INFO() /* just clear sys.exc_info(), not compliant, but it shouldn't be used in 1st place */
|
2014-03-29 17:16:27 -04:00
|
|
|
|
2014-01-18 09:10:48 -05:00
|
|
|
// fastn has items in reverse order (fastn[0] is local[0], fastn[-1] is local[1], etc)
|
|
|
|
// sp points to bottom of stack which grows up
|
2014-02-15 17:55:00 -05:00
|
|
|
// returns:
|
|
|
|
// MP_VM_RETURN_NORMAL, sp valid, return value in *sp
|
|
|
|
// MP_VM_RETURN_YIELD, ip, sp valid, yielded value in *sp
|
|
|
|
// MP_VM_RETURN_EXCEPTION, exception in fastn[0]
|
2014-06-07 09:16:08 -04:00
|
|
|
mp_vm_return_kind_t mp_execute_bytecode(mp_code_state *code_state, volatile mp_obj_t inject_exc) {
|
2014-12-28 00:17:43 -05:00
|
|
|
#define SELECTIVE_EXC_IP (0)
|
|
|
|
#if SELECTIVE_EXC_IP
|
2014-12-28 19:29:59 -05:00
|
|
|
#define MARK_EXC_IP_SELECTIVE() { code_state->ip = ip; } /* stores ip 1 byte past last opcode */
|
2014-12-28 00:17:43 -05:00
|
|
|
#define MARK_EXC_IP_GLOBAL()
|
|
|
|
#else
|
|
|
|
#define MARK_EXC_IP_SELECTIVE()
|
2014-12-28 19:29:59 -05:00
|
|
|
#define MARK_EXC_IP_GLOBAL() { code_state->ip = ip; } /* stores ip pointing to last opcode */
|
2014-12-28 00:17:43 -05:00
|
|
|
#endif
|
2014-05-21 15:32:59 -04:00
|
|
|
#if MICROPY_OPT_COMPUTED_GOTO
|
2015-01-01 15:27:54 -05:00
|
|
|
#include "py/vmentrytable.h"
|
2014-04-15 03:57:01 -04:00
|
|
|
#define DISPATCH() do { \
|
2014-04-22 20:40:24 -04:00
|
|
|
TRACE(ip); \
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_GLOBAL(); \
|
2014-04-27 13:19:06 -04:00
|
|
|
goto *entry_table[*ip++]; \
|
2015-04-09 11:29:54 -04:00
|
|
|
} while (0)
|
2014-10-25 13:19:55 -04:00
|
|
|
#define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
|
2014-04-15 03:57:01 -04:00
|
|
|
#define ENTRY(op) entry_##op
|
|
|
|
#define ENTRY_DEFAULT entry_default
|
2014-04-14 11:22:44 -04:00
|
|
|
#else
|
2014-04-15 03:57:01 -04:00
|
|
|
#define DISPATCH() break
|
2014-10-25 13:19:55 -04:00
|
|
|
#define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
|
2014-04-15 03:57:01 -04:00
|
|
|
#define ENTRY(op) case op
|
|
|
|
#define ENTRY_DEFAULT default
|
2014-04-14 11:22:44 -04:00
|
|
|
#endif
|
|
|
|
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
// nlr_raise needs to be implemented as a goto, so that the C compiler's flow analyser
|
|
|
|
// sees that it's possible for us to jump from the dispatch loop to the exception
|
|
|
|
// handler. Without this, the code may have a different stack layout in the dispatch
|
|
|
|
// loop and the exception handler, leading to very obscure bugs.
|
2015-04-09 11:29:54 -04:00
|
|
|
#define RAISE(o) do { nlr_pop(); nlr.ret_val = o; goto exception_handler; } while (0)
|
2013-10-04 14:53:11 -04:00
|
|
|
|
2015-03-27 19:14:44 -04:00
|
|
|
#if MICROPY_STACKLESS
|
|
|
|
run_code_state: ;
|
|
|
|
#endif
|
2014-06-07 09:16:08 -04:00
|
|
|
// Pointers which are constant for particular invocation of mp_execute_bytecode()
|
2015-04-09 05:34:08 -04:00
|
|
|
mp_obj_t * /*const*/ fastn = &code_state->state[code_state->n_state - 1];
|
|
|
|
mp_exc_stack_t * /*const*/ exc_stack = (mp_exc_stack_t*)(code_state->state + code_state->n_state);
|
2014-05-31 09:50:46 -04:00
|
|
|
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
// variables that are visible to the exception handler (declared volatile)
|
2014-12-22 07:49:57 -05:00
|
|
|
volatile bool currently_in_except_block = MP_TAGPTR_TAG0(code_state->exc_sp); // 0 or 1, to detect nested exceptions
|
2014-05-31 09:50:46 -04:00
|
|
|
mp_exc_stack_t *volatile exc_sp = MP_TAGPTR_PTR(code_state->exc_sp); // stack grows up, exc_sp points to top of stack
|
2013-10-15 18:46:01 -04:00
|
|
|
|
2013-10-15 17:25:17 -04:00
|
|
|
// outer exception handling loop
|
2013-10-04 14:53:11 -04:00
|
|
|
for (;;) {
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
nlr_buf_t nlr;
|
2014-03-26 14:37:06 -04:00
|
|
|
outer_dispatch_loop:
|
2013-10-15 17:25:17 -04:00
|
|
|
if (nlr_push(&nlr) == 0) {
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
// local variables that are not visible to the exception handler
|
2014-05-31 09:50:46 -04:00
|
|
|
const byte *ip = code_state->ip;
|
|
|
|
mp_obj_t *sp = code_state->sp;
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t obj_shared;
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
|
2014-03-22 11:50:12 -04:00
|
|
|
// If we have exception to inject, now that we finish setting up
|
|
|
|
// execution context, raise it. This works as if RAISE_VARARGS
|
|
|
|
// bytecode was executed.
|
2014-03-26 11:36:12 -04:00
|
|
|
// Injecting exc into yield from generator is a special case,
|
|
|
|
// handled by MP_BC_YIELD_FROM itself
|
|
|
|
if (inject_exc != MP_OBJ_NULL && *ip != MP_BC_YIELD_FROM) {
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t exc = inject_exc;
|
2014-03-22 11:50:12 -04:00
|
|
|
inject_exc = MP_OBJ_NULL;
|
2014-05-25 17:58:04 -04:00
|
|
|
exc = mp_make_raise_obj(exc);
|
|
|
|
RAISE(exc);
|
2014-03-22 11:50:12 -04:00
|
|
|
}
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
|
2013-10-15 17:25:17 -04:00
|
|
|
// loop to execute byte code
|
|
|
|
for (;;) {
|
2014-01-31 17:55:05 -05:00
|
|
|
dispatch_loop:
|
2014-05-21 15:32:59 -04:00
|
|
|
#if MICROPY_OPT_COMPUTED_GOTO
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
|
|
|
#else
|
2014-04-22 20:40:24 -04:00
|
|
|
TRACE(ip);
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_GLOBAL();
|
2014-04-27 13:19:06 -04:00
|
|
|
switch (*ip++) {
|
2014-04-14 11:22:44 -04:00
|
|
|
#endif
|
2014-04-09 10:26:46 -04:00
|
|
|
|
2014-04-14 11:22:44 -04:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_FALSE):
|
|
|
|
PUSH(mp_const_false);
|
|
|
|
DISPATCH();
|
2014-04-09 10:26:46 -04:00
|
|
|
|
2014-04-14 11:22:44 -04:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_NONE):
|
|
|
|
PUSH(mp_const_none);
|
|
|
|
DISPATCH();
|
2014-03-23 15:19:02 -04:00
|
|
|
|
2014-04-14 11:22:44 -04:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_TRUE):
|
|
|
|
PUSH(mp_const_true);
|
|
|
|
DISPATCH();
|
2014-04-08 16:11:49 -04:00
|
|
|
|
2014-04-14 11:22:44 -04:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_SMALL_INT): {
|
2014-07-03 08:25:24 -04:00
|
|
|
mp_int_t num = 0;
|
2014-04-14 11:22:44 -04:00
|
|
|
if ((ip[0] & 0x40) != 0) {
|
|
|
|
// Number is negative
|
|
|
|
num--;
|
|
|
|
}
|
|
|
|
do {
|
|
|
|
num = (num << 7) | (*ip & 0x7f);
|
|
|
|
} while ((*ip++ & 0x80) != 0);
|
|
|
|
PUSH(MP_OBJ_NEW_SMALL_INT(num));
|
|
|
|
DISPATCH();
|
|
|
|
}
|
2013-10-15 17:25:17 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_BYTES): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
PUSH(mp_load_const_bytes(qst));
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_STRING): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
PUSH(mp_load_const_str(qst));
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2015-01-13 10:55:54 -05:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_OBJ): {
|
|
|
|
DECODE_PTR;
|
|
|
|
PUSH(ptr);
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
|
2014-04-14 11:22:44 -04:00
|
|
|
ENTRY(MP_BC_LOAD_NULL):
|
|
|
|
PUSH(MP_OBJ_NULL);
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_LOAD_FAST_N): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
2014-05-25 17:58:04 -04:00
|
|
|
obj_shared = fastn[-unum];
|
2014-04-14 11:22:44 -04:00
|
|
|
load_check:
|
2014-05-25 17:58:04 -04:00
|
|
|
if (obj_shared == MP_OBJ_NULL) {
|
|
|
|
local_name_error: {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NameError, "local variable referenced before assignment");
|
|
|
|
RAISE(obj);
|
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
2014-05-25 17:58:04 -04:00
|
|
|
PUSH(obj_shared);
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_LOAD_DEREF): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
2014-05-25 17:58:04 -04:00
|
|
|
obj_shared = mp_obj_cell_get(fastn[-unum]);
|
2014-04-14 11:22:44 -04:00
|
|
|
goto load_check;
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2015-01-06 07:51:39 -05:00
|
|
|
#if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_LOAD_NAME): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
PUSH(mp_load_name(qst));
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2015-01-06 07:51:39 -05:00
|
|
|
#else
|
|
|
|
ENTRY(MP_BC_LOAD_NAME): {
|
|
|
|
MARK_EXC_IP_SELECTIVE();
|
|
|
|
DECODE_QSTR;
|
|
|
|
mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
|
|
|
|
mp_uint_t x = *ip;
|
|
|
|
if (x < MP_STATE_CTX(dict_locals)->map.alloc && MP_STATE_CTX(dict_locals)->map.table[x].key == key) {
|
|
|
|
PUSH(MP_STATE_CTX(dict_locals)->map.table[x].value);
|
|
|
|
} else {
|
|
|
|
mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_CTX(dict_locals)->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
|
|
|
|
if (elem != NULL) {
|
|
|
|
*(byte*)ip = (elem - &MP_STATE_CTX(dict_locals)->map.table[0]) & 0xff;
|
|
|
|
PUSH(elem->value);
|
|
|
|
} else {
|
|
|
|
PUSH(mp_load_name(MP_OBJ_QSTR_VALUE(key)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ip++;
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
#endif
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2015-01-06 07:51:39 -05:00
|
|
|
#if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_LOAD_GLOBAL): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
PUSH(mp_load_global(qst));
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2015-01-06 07:51:39 -05:00
|
|
|
#else
|
|
|
|
ENTRY(MP_BC_LOAD_GLOBAL): {
|
|
|
|
MARK_EXC_IP_SELECTIVE();
|
|
|
|
DECODE_QSTR;
|
|
|
|
mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
|
|
|
|
mp_uint_t x = *ip;
|
|
|
|
if (x < MP_STATE_CTX(dict_globals)->map.alloc && MP_STATE_CTX(dict_globals)->map.table[x].key == key) {
|
|
|
|
PUSH(MP_STATE_CTX(dict_globals)->map.table[x].value);
|
|
|
|
} else {
|
|
|
|
mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_CTX(dict_globals)->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
|
|
|
|
if (elem != NULL) {
|
|
|
|
*(byte*)ip = (elem - &MP_STATE_CTX(dict_globals)->map.table[0]) & 0xff;
|
|
|
|
PUSH(elem->value);
|
|
|
|
} else {
|
|
|
|
PUSH(mp_load_global(MP_OBJ_QSTR_VALUE(key)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ip++;
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
#endif
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2015-01-06 07:51:39 -05:00
|
|
|
#if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_LOAD_ATTR): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
SET_TOP(mp_load_attr(TOP(), qst));
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2015-01-06 07:51:39 -05:00
|
|
|
#else
|
|
|
|
ENTRY(MP_BC_LOAD_ATTR): {
|
|
|
|
MARK_EXC_IP_SELECTIVE();
|
|
|
|
DECODE_QSTR;
|
|
|
|
mp_obj_t top = TOP();
|
2015-04-01 10:10:50 -04:00
|
|
|
if (mp_obj_get_type(top)->attr == mp_obj_instance_attr) {
|
2015-01-06 07:51:39 -05:00
|
|
|
mp_obj_instance_t *self = top;
|
|
|
|
mp_uint_t x = *ip;
|
|
|
|
mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
|
|
|
|
mp_map_elem_t *elem;
|
|
|
|
if (x < self->members.alloc && self->members.table[x].key == key) {
|
|
|
|
elem = &self->members.table[x];
|
|
|
|
} else {
|
|
|
|
elem = mp_map_lookup(&self->members, key, MP_MAP_LOOKUP);
|
|
|
|
if (elem != NULL) {
|
|
|
|
*(byte*)ip = elem - &self->members.table[0];
|
|
|
|
} else {
|
|
|
|
goto load_attr_cache_fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SET_TOP(elem->value);
|
|
|
|
ip++;
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
load_attr_cache_fail:
|
|
|
|
SET_TOP(mp_load_attr(top, qst));
|
|
|
|
ip++;
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
#endif
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_LOAD_METHOD): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_load_method(*sp, qst, sp);
|
|
|
|
sp += 1;
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
|
|
|
ENTRY(MP_BC_LOAD_BUILD_CLASS):
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
PUSH(mp_load_build_class());
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_LOAD_SUBSCR): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t index = POP();
|
|
|
|
SET_TOP(mp_obj_subscr(TOP(), index, MP_OBJ_SENTINEL));
|
2014-04-17 17:10:53 -04:00
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-17 17:10:53 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_STORE_FAST_N): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
fastn[-unum] = POP();
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_STORE_DEREF): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
mp_obj_cell_set(fastn[-unum], POP());
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_STORE_NAME): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_store_name(qst, POP());
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_STORE_GLOBAL): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_store_global(qst, POP());
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2015-01-06 07:51:39 -05:00
|
|
|
#if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_STORE_ATTR): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_store_attr(sp[0], qst, sp[-1]);
|
|
|
|
sp -= 2;
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2015-01-06 07:51:39 -05:00
|
|
|
#else
|
2015-02-14 12:43:54 -05:00
|
|
|
// This caching code works with MICROPY_PY_BUILTINS_PROPERTY and/or
|
|
|
|
// MICROPY_PY_DESCRIPTORS enabled because if the attr exists in
|
|
|
|
// self->members then it can't be a property or have descriptors. A
|
2015-01-06 07:51:39 -05:00
|
|
|
// consequence of this is that we can't use MP_MAP_LOOKUP_ADD_IF_NOT_FOUND
|
|
|
|
// in the fast-path below, because that store could override a property.
|
|
|
|
ENTRY(MP_BC_STORE_ATTR): {
|
|
|
|
MARK_EXC_IP_SELECTIVE();
|
|
|
|
DECODE_QSTR;
|
|
|
|
mp_obj_t top = TOP();
|
2015-04-01 10:10:50 -04:00
|
|
|
if (mp_obj_get_type(top)->attr == mp_obj_instance_attr && sp[-1] != MP_OBJ_NULL) {
|
2015-01-06 07:51:39 -05:00
|
|
|
mp_obj_instance_t *self = top;
|
|
|
|
mp_uint_t x = *ip;
|
|
|
|
mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
|
|
|
|
mp_map_elem_t *elem;
|
|
|
|
if (x < self->members.alloc && self->members.table[x].key == key) {
|
|
|
|
elem = &self->members.table[x];
|
|
|
|
} else {
|
|
|
|
elem = mp_map_lookup(&self->members, key, MP_MAP_LOOKUP);
|
|
|
|
if (elem != NULL) {
|
|
|
|
*(byte*)ip = elem - &self->members.table[0];
|
|
|
|
} else {
|
|
|
|
goto store_attr_cache_fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
elem->value = sp[-1];
|
|
|
|
sp -= 2;
|
|
|
|
ip++;
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
store_attr_cache_fail:
|
|
|
|
mp_store_attr(sp[0], qst, sp[-1]);
|
|
|
|
sp -= 2;
|
|
|
|
ip++;
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
#endif
|
2014-04-14 11:22:44 -04:00
|
|
|
|
|
|
|
ENTRY(MP_BC_STORE_SUBSCR):
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-17 17:10:53 -04:00
|
|
|
mp_obj_subscr(sp[-1], sp[0], sp[-2]);
|
2014-04-14 11:22:44 -04:00
|
|
|
sp -= 3;
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_DELETE_FAST): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
if (fastn[-unum] == MP_OBJ_NULL) {
|
|
|
|
goto local_name_error;
|
|
|
|
}
|
|
|
|
fastn[-unum] = MP_OBJ_NULL;
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2013-10-15 17:25:17 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_DELETE_DEREF): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
if (mp_obj_cell_get(fastn[-unum]) == MP_OBJ_NULL) {
|
|
|
|
goto local_name_error;
|
|
|
|
}
|
|
|
|
mp_obj_cell_set(fastn[-unum], MP_OBJ_NULL);
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_DELETE_NAME): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_delete_name(qst);
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_DELETE_GLOBAL): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_delete_global(qst);
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_DUP_TOP): {
|
|
|
|
mp_obj_t top = TOP();
|
|
|
|
PUSH(top);
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
|
|
|
ENTRY(MP_BC_DUP_TOP_TWO):
|
|
|
|
sp += 2;
|
|
|
|
sp[0] = sp[-2];
|
|
|
|
sp[-1] = sp[-3];
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_POP_TOP):
|
|
|
|
sp -= 1;
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_ROT_TWO): {
|
|
|
|
mp_obj_t top = sp[0];
|
2014-04-14 11:22:44 -04:00
|
|
|
sp[0] = sp[-1];
|
2014-05-25 17:58:04 -04:00
|
|
|
sp[-1] = top;
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_ROT_THREE): {
|
|
|
|
mp_obj_t top = sp[0];
|
2014-04-14 11:22:44 -04:00
|
|
|
sp[0] = sp[-1];
|
|
|
|
sp[-1] = sp[-2];
|
2014-05-25 17:58:04 -04:00
|
|
|
sp[-2] = top;
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_JUMP): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_SLABEL;
|
2014-12-02 14:25:10 -05:00
|
|
|
ip += slab;
|
2014-10-25 13:19:55 -04:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_POP_JUMP_IF_TRUE): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_SLABEL;
|
|
|
|
if (mp_obj_is_true(POP())) {
|
2014-12-02 14:25:10 -05:00
|
|
|
ip += slab;
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
2014-10-25 13:19:55 -04:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2013-11-09 15:12:32 -05:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_POP_JUMP_IF_FALSE): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_SLABEL;
|
|
|
|
if (!mp_obj_is_true(POP())) {
|
2014-12-02 14:25:10 -05:00
|
|
|
ip += slab;
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
2014-10-25 13:19:55 -04:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2013-11-09 15:12:32 -05:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_JUMP_IF_TRUE_OR_POP): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_SLABEL;
|
|
|
|
if (mp_obj_is_true(TOP())) {
|
2014-12-02 14:25:10 -05:00
|
|
|
ip += slab;
|
2014-04-14 11:22:44 -04:00
|
|
|
} else {
|
|
|
|
sp--;
|
|
|
|
}
|
2014-10-25 13:19:55 -04:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2013-10-15 17:25:17 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_JUMP_IF_FALSE_OR_POP): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_SLABEL;
|
|
|
|
if (mp_obj_is_true(TOP())) {
|
|
|
|
sp--;
|
|
|
|
} else {
|
2014-12-02 14:25:10 -05:00
|
|
|
ip += slab;
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
2014-10-25 13:19:55 -04:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_SETUP_WITH): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2015-04-23 20:52:28 -04:00
|
|
|
// stack: (..., ctx_mgr)
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t obj = TOP();
|
2015-04-23 20:52:28 -04:00
|
|
|
mp_load_method(obj, MP_QSTR___exit__, sp);
|
|
|
|
mp_load_method(obj, MP_QSTR___enter__, sp + 2);
|
|
|
|
mp_obj_t ret = mp_call_method_n_kw(0, 0, sp + 2);
|
|
|
|
sp += 1;
|
2014-12-22 07:49:57 -05:00
|
|
|
PUSH_EXC_BLOCK(1);
|
2014-05-25 17:58:04 -04:00
|
|
|
PUSH(ret);
|
2015-04-23 20:52:28 -04:00
|
|
|
// stack: (..., __exit__, ctx_mgr, as_value)
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
|
|
|
ENTRY(MP_BC_WITH_CLEANUP): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
// Arriving here, there's "exception control block" on top of stack,
|
2015-04-23 20:52:28 -04:00
|
|
|
// and __exit__ method (with self) underneath it. Bytecode calls __exit__,
|
2014-04-14 11:22:44 -04:00
|
|
|
// and "deletes" it off stack, shifting "exception control block"
|
|
|
|
// to its place.
|
|
|
|
if (TOP() == mp_const_none) {
|
2015-04-23 20:52:28 -04:00
|
|
|
// stack: (..., __exit__, ctx_mgr, None)
|
|
|
|
sp[1] = mp_const_none;
|
|
|
|
sp[2] = mp_const_none;
|
|
|
|
sp -= 2;
|
|
|
|
mp_call_method_n_kw(3, 0, sp);
|
2014-04-14 11:22:44 -04:00
|
|
|
SET_TOP(mp_const_none);
|
|
|
|
} else if (MP_OBJ_IS_SMALL_INT(TOP())) {
|
2015-03-25 18:20:37 -04:00
|
|
|
mp_int_t cause_val = MP_OBJ_SMALL_INT_VALUE(TOP());
|
|
|
|
if (cause_val == UNWIND_RETURN) {
|
2015-04-23 20:52:28 -04:00
|
|
|
// stack: (..., __exit__, ctx_mgr, ret_val, UNWIND_RETURN)
|
|
|
|
mp_obj_t ret_val = sp[-1];
|
|
|
|
sp[-1] = mp_const_none;
|
|
|
|
sp[0] = mp_const_none;
|
|
|
|
sp[1] = mp_const_none;
|
|
|
|
mp_call_method_n_kw(3, 0, sp - 3);
|
|
|
|
sp[-3] = ret_val;
|
|
|
|
sp[-2] = MP_OBJ_NEW_SMALL_INT(UNWIND_RETURN);
|
2015-03-25 18:20:37 -04:00
|
|
|
} else {
|
|
|
|
assert(cause_val == UNWIND_JUMP);
|
2015-04-23 20:52:28 -04:00
|
|
|
// stack: (..., __exit__, ctx_mgr, dest_ip, num_exc, UNWIND_JUMP)
|
|
|
|
mp_obj_t dest_ip = sp[-2];
|
|
|
|
mp_obj_t num_exc = sp[-1];
|
|
|
|
sp[-2] = mp_const_none;
|
|
|
|
sp[-1] = mp_const_none;
|
|
|
|
sp[0] = mp_const_none;
|
|
|
|
mp_call_method_n_kw(3, 0, sp - 4);
|
|
|
|
sp[-4] = dest_ip;
|
|
|
|
sp[-3] = num_exc;
|
|
|
|
sp[-2] = MP_OBJ_NEW_SMALL_INT(UNWIND_JUMP);
|
2014-03-28 22:10:11 -04:00
|
|
|
}
|
2015-04-23 20:52:28 -04:00
|
|
|
sp -= 2; // we removed (__exit__, ctx_mgr)
|
2015-03-25 18:20:37 -04:00
|
|
|
} else {
|
|
|
|
assert(mp_obj_is_exception_type(TOP()));
|
2015-04-23 20:52:28 -04:00
|
|
|
// stack: (..., __exit__, ctx_mgr, traceback, exc_val, exc_type)
|
2015-02-10 08:21:42 -05:00
|
|
|
// Need to pass (sp[0], sp[-1], sp[-2]) as arguments so must reverse the
|
|
|
|
// order of these on the value stack (don't want to create a temporary
|
|
|
|
// array because it increases stack footprint of the VM).
|
|
|
|
mp_obj_t obj = sp[-2];
|
|
|
|
sp[-2] = sp[0];
|
|
|
|
sp[0] = obj;
|
2015-04-23 20:52:28 -04:00
|
|
|
mp_obj_t ret_value = mp_call_method_n_kw(3, 0, sp - 4);
|
2014-05-25 17:58:04 -04:00
|
|
|
if (mp_obj_is_true(ret_value)) {
|
2015-04-23 20:52:28 -04:00
|
|
|
// We need to silence/swallow the exception. This is done
|
|
|
|
// by popping the exception and the __exit__ handler and
|
|
|
|
// replacing it with None, which signals END_FINALLY to just
|
|
|
|
// execute the finally handler normally.
|
2015-02-10 08:21:42 -05:00
|
|
|
sp -= 4;
|
2015-04-23 20:52:28 -04:00
|
|
|
SET_TOP(mp_const_none);
|
2014-02-01 15:08:18 -05:00
|
|
|
assert(exc_sp >= exc_stack);
|
2014-04-14 11:22:44 -04:00
|
|
|
POP_EXC_BLOCK();
|
2015-02-10 08:21:42 -05:00
|
|
|
} else {
|
2015-04-23 20:52:28 -04:00
|
|
|
// We need to re-raise the exception. We pop __exit__ handler
|
|
|
|
// and copy the 3 exception values down (remembering that they
|
|
|
|
// are reversed due to above code).
|
|
|
|
sp[-4] = sp[0];
|
|
|
|
sp[-3] = sp[-1];
|
|
|
|
sp -= 2;
|
2014-01-30 06:49:18 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
|
|
|
DISPATCH();
|
|
|
|
}
|
2013-10-15 17:25:17 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_UNWIND_JUMP): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_SLABEL;
|
2014-12-02 14:25:10 -05:00
|
|
|
PUSH((void*)(ip + slab)); // push destination ip for jump
|
2014-07-03 08:25:24 -04:00
|
|
|
PUSH((void*)(mp_uint_t)(*ip)); // push number of exception handlers to unwind (0x80 bit set if we also need to pop stack)
|
2014-12-02 14:25:10 -05:00
|
|
|
unwind_jump:;
|
|
|
|
mp_uint_t unum = (mp_uint_t)POP(); // get number of exception handlers to unwind
|
2014-05-30 10:20:41 -04:00
|
|
|
while ((unum & 0x7f) > 0) {
|
2014-04-14 11:22:44 -04:00
|
|
|
unum -= 1;
|
2014-03-22 07:49:31 -04:00
|
|
|
assert(exc_sp >= exc_stack);
|
2014-12-22 07:49:57 -05:00
|
|
|
if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
|
2014-04-14 11:22:44 -04:00
|
|
|
// We're going to run "finally" code as a coroutine
|
|
|
|
// (not calling it recursively). Set up a sentinel
|
|
|
|
// on a stack so it can return back to us when it is
|
|
|
|
// done (when END_FINALLY reached).
|
|
|
|
PUSH((void*)unum); // push number of exception handlers left to unwind
|
|
|
|
PUSH(MP_OBJ_NEW_SMALL_INT(UNWIND_JUMP)); // push sentinel
|
|
|
|
ip = exc_sp->handler; // get exception handler byte code address
|
|
|
|
exc_sp--; // pop exception handler
|
|
|
|
goto dispatch_loop; // run the exception handler
|
2014-02-01 18:04:09 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
exc_sp--;
|
|
|
|
}
|
|
|
|
ip = (const byte*)POP(); // pop destination ip for jump
|
2014-05-30 10:20:41 -04:00
|
|
|
if (unum != 0) {
|
|
|
|
sp--;
|
|
|
|
}
|
2014-10-25 13:19:55 -04:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
|
|
|
// matched against: POP_BLOCK or POP_EXCEPT (anything else?)
|
|
|
|
ENTRY(MP_BC_SETUP_EXCEPT):
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_SETUP_FINALLY): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-12-28 19:29:59 -05:00
|
|
|
#if SELECTIVE_EXC_IP
|
|
|
|
PUSH_EXC_BLOCK((code_state->ip[-1] == MP_BC_SETUP_FINALLY) ? 1 : 0);
|
|
|
|
#else
|
|
|
|
PUSH_EXC_BLOCK((code_state->ip[0] == MP_BC_SETUP_FINALLY) ? 1 : 0);
|
|
|
|
#endif
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
|
|
|
ENTRY(MP_BC_END_FINALLY):
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
// not fully implemented
|
|
|
|
// if TOS is an exception, reraises the exception (3 values on TOS)
|
|
|
|
// if TOS is None, just pops it and continues
|
|
|
|
// if TOS is an integer, does something else
|
|
|
|
// else error
|
|
|
|
if (mp_obj_is_exception_type(TOP())) {
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
RAISE(sp[-1]);
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
|
|
|
if (TOP() == mp_const_none) {
|
2014-01-18 09:10:48 -05:00
|
|
|
sp--;
|
2015-03-25 18:20:37 -04:00
|
|
|
} else {
|
|
|
|
assert(MP_OBJ_IS_SMALL_INT(TOP()));
|
2014-04-14 11:22:44 -04:00
|
|
|
// We finished "finally" coroutine and now dispatch back
|
|
|
|
// to our caller, based on TOS value
|
|
|
|
mp_unwind_reason_t reason = MP_OBJ_SMALL_INT_VALUE(POP());
|
2015-03-25 18:20:37 -04:00
|
|
|
if (reason == UNWIND_RETURN) {
|
|
|
|
goto unwind_return;
|
|
|
|
} else {
|
|
|
|
assert(reason == UNWIND_JUMP);
|
|
|
|
goto unwind_jump;
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_GET_ITER):
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
SET_TOP(mp_getiter(TOP()));
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_FOR_ITER): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
|
py, vm: Replace save_ip, save_sp with code_state->{ip, sp}.
This may seem a bit of a risky change, in that it may introduce crazy
bugs with respect to volatile variables in the VM loop. But, I think it
should be fine: code_state points to some external memory, so the
compiler should always read/write to that memory when accessing the
ip/sp variables (ie not put them in registers).
Anyway, it passes all tests and improves on all efficiency fronts: about
2-4% faster (64-bit unix), 16 bytes less stack space per call (64-bit
unix) and slightly less executable size (unix and stmhal).
The reason it's more efficient is save_ip and save_sp were volatile
variables, so were anyway stored on the stack (in memory, not regs).
Thus converting them to code_state->{ip, sp} doesn't cost an extra
memory dereference (except maybe to get code_state, but that can be put
in a register and then made more efficient for other uses of it).
2014-06-01 07:32:28 -04:00
|
|
|
code_state->sp = sp;
|
2014-05-11 13:32:39 -04:00
|
|
|
assert(TOP());
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t value = mp_iternext_allow_raise(TOP());
|
|
|
|
if (value == MP_OBJ_STOP_ITERATION) {
|
2014-04-14 11:22:44 -04:00
|
|
|
--sp; // pop the exhausted iterator
|
2014-12-02 14:25:10 -05:00
|
|
|
ip += ulab; // jump to after for-block
|
2014-04-14 11:22:44 -04:00
|
|
|
} else {
|
2014-05-25 17:58:04 -04:00
|
|
|
PUSH(value); // push the next iteration value
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
|
|
|
// matched against: SETUP_EXCEPT, SETUP_FINALLY, SETUP_WITH
|
|
|
|
ENTRY(MP_BC_POP_BLOCK):
|
|
|
|
// we are exiting an exception handler, so pop the last one of the exception-stack
|
|
|
|
assert(exc_sp >= exc_stack);
|
|
|
|
POP_EXC_BLOCK();
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
// matched against: SETUP_EXCEPT
|
|
|
|
ENTRY(MP_BC_POP_EXCEPT):
|
|
|
|
// TODO need to work out how blocks work etc
|
|
|
|
// pops block, checks it's an exception block, and restores the stack, saving the 3 exception values to local threadstate
|
|
|
|
assert(exc_sp >= exc_stack);
|
|
|
|
assert(currently_in_except_block);
|
|
|
|
//sp = (mp_obj_t*)(*exc_sp--);
|
|
|
|
//exc_sp--; // discard ip
|
|
|
|
POP_EXC_BLOCK();
|
|
|
|
//sp -= 3; // pop 3 exception values
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_NOT):
|
|
|
|
if (TOP() == mp_const_true) {
|
|
|
|
SET_TOP(mp_const_false);
|
|
|
|
} else {
|
|
|
|
SET_TOP(mp_const_true);
|
|
|
|
}
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_BUILD_TUPLE): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
sp -= unum - 1;
|
|
|
|
SET_TOP(mp_obj_new_tuple(unum, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_BUILD_LIST): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
sp -= unum - 1;
|
|
|
|
SET_TOP(mp_obj_new_list(unum, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_LIST_APPEND): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
// I think it's guaranteed by the compiler that sp[unum] is a list
|
|
|
|
mp_obj_list_append(sp[-unum], sp[0]);
|
|
|
|
sp--;
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_BUILD_MAP): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
PUSH(mp_obj_new_dict(unum));
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
|
|
|
ENTRY(MP_BC_STORE_MAP):
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
sp -= 2;
|
|
|
|
mp_obj_dict_store(sp[0], sp[2], sp[1]);
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_MAP_ADD): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
// I think it's guaranteed by the compiler that sp[-unum - 1] is a map
|
|
|
|
mp_obj_dict_store(sp[-unum - 1], sp[0], sp[-1]);
|
|
|
|
sp -= 2;
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-06-01 08:46:47 -04:00
|
|
|
#if MICROPY_PY_BUILTINS_SET
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_BUILD_SET): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
sp -= unum - 1;
|
|
|
|
SET_TOP(mp_obj_new_set(unum, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_SET_ADD): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
// I think it's guaranteed by the compiler that sp[-unum] is a set
|
|
|
|
mp_obj_set_store(sp[-unum], sp[0]);
|
|
|
|
sp--;
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-06-01 08:46:47 -04:00
|
|
|
#endif
|
2013-10-16 15:57:49 -04:00
|
|
|
|
2014-06-01 08:32:54 -04:00
|
|
|
#if MICROPY_PY_BUILTINS_SLICE
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_BUILD_SLICE): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
if (unum == 2) {
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t stop = POP();
|
|
|
|
mp_obj_t start = TOP();
|
|
|
|
SET_TOP(mp_obj_new_slice(start, stop, mp_const_none));
|
2014-04-14 11:22:44 -04:00
|
|
|
} else {
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t step = POP();
|
|
|
|
mp_obj_t stop = POP();
|
|
|
|
mp_obj_t start = TOP();
|
|
|
|
SET_TOP(mp_obj_new_slice(start, stop, step));
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-01-03 18:34:23 -05:00
|
|
|
#endif
|
2014-01-02 19:48:56 -05:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_UNPACK_SEQUENCE): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
mp_unpack_sequence(sp[0], unum, sp);
|
|
|
|
sp += unum - 1;
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_UNPACK_EX): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
mp_unpack_ex(sp[0], unum, sp);
|
|
|
|
sp += (unum & 0xff) + ((unum >> 8) & 0xff);
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_MAKE_FUNCTION): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_PTR;
|
2014-12-02 14:25:10 -05:00
|
|
|
PUSH(mp_make_function_from_raw_code(ptr, MP_OBJ_NULL, MP_OBJ_NULL));
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_MAKE_FUNCTION_DEFARGS): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_PTR;
|
|
|
|
// Stack layout: def_tuple def_dict <- TOS
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t def_dict = POP();
|
2014-12-02 14:25:10 -05:00
|
|
|
SET_TOP(mp_make_function_from_raw_code(ptr, TOP(), def_dict));
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-04-20 12:50:40 -04:00
|
|
|
ENTRY(MP_BC_MAKE_CLOSURE): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_PTR;
|
2014-07-03 08:25:24 -04:00
|
|
|
mp_uint_t n_closed_over = *ip++;
|
2014-04-20 12:50:40 -04:00
|
|
|
// Stack layout: closed_overs <- TOS
|
|
|
|
sp -= n_closed_over - 1;
|
2014-12-02 14:25:10 -05:00
|
|
|
SET_TOP(mp_make_closure_from_raw_code(ptr, n_closed_over, sp));
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-04-20 12:50:40 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-04-20 12:50:40 -04:00
|
|
|
ENTRY(MP_BC_MAKE_CLOSURE_DEFARGS): {
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_PTR;
|
2014-07-03 08:25:24 -04:00
|
|
|
mp_uint_t n_closed_over = *ip++;
|
2014-04-20 12:50:40 -04:00
|
|
|
// Stack layout: def_tuple def_dict closed_overs <- TOS
|
|
|
|
sp -= 2 + n_closed_over - 1;
|
2014-12-02 14:25:10 -05:00
|
|
|
SET_TOP(mp_make_closure_from_raw_code(ptr, 0x100 | n_closed_over, sp));
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-04-20 12:50:40 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_CALL_FUNCTION): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
// unum & 0xff == n_positional
|
|
|
|
// (unum >> 8) & 0xff == n_keyword
|
|
|
|
sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe);
|
2015-03-27 19:14:44 -04:00
|
|
|
#if MICROPY_STACKLESS
|
|
|
|
if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
|
|
|
|
code_state->ip = ip;
|
|
|
|
code_state->sp = sp;
|
|
|
|
code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
|
|
|
|
mp_code_state *new_state = mp_obj_fun_bc_prepare_codestate(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1);
|
2015-03-27 19:14:44 -04:00
|
|
|
if (new_state) {
|
|
|
|
new_state->prev = code_state;
|
|
|
|
code_state = new_state;
|
|
|
|
nlr_pop();
|
|
|
|
goto run_code_state;
|
|
|
|
}
|
2015-03-27 19:14:45 -04:00
|
|
|
#if MICROPY_STACKLESS_STRICT
|
|
|
|
else {
|
|
|
|
deep_recursion_error:
|
|
|
|
mp_exc_recursion_depth();
|
|
|
|
}
|
|
|
|
#endif
|
2015-03-27 19:14:44 -04:00
|
|
|
}
|
|
|
|
#endif
|
2014-04-14 11:22:44 -04:00
|
|
|
SET_TOP(mp_call_function_n_kw(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1));
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_CALL_FUNCTION_VAR_KW): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
// unum & 0xff == n_positional
|
|
|
|
// (unum >> 8) & 0xff == n_keyword
|
|
|
|
// We have folowing stack layout here:
|
|
|
|
// fun arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
|
|
|
|
sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 2;
|
2015-03-27 19:14:45 -04:00
|
|
|
#if MICROPY_STACKLESS
|
|
|
|
if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
|
|
|
|
code_state->ip = ip;
|
|
|
|
code_state->sp = sp;
|
|
|
|
code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
|
|
|
|
|
2015-04-01 18:31:30 -04:00
|
|
|
mp_call_args_t out_args;
|
2015-03-27 19:14:45 -04:00
|
|
|
mp_call_prepare_args_n_kw_var(false, unum, sp, &out_args);
|
|
|
|
|
|
|
|
mp_code_state *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
|
|
|
|
out_args.n_args, out_args.n_kw, out_args.args);
|
|
|
|
m_del(mp_obj_t, out_args.args, out_args.n_alloc);
|
|
|
|
if (new_state) {
|
|
|
|
new_state->prev = code_state;
|
|
|
|
code_state = new_state;
|
|
|
|
nlr_pop();
|
|
|
|
goto run_code_state;
|
|
|
|
}
|
2015-03-27 19:14:45 -04:00
|
|
|
#if MICROPY_STACKLESS_STRICT
|
|
|
|
else {
|
|
|
|
goto deep_recursion_error;
|
|
|
|
}
|
|
|
|
#endif
|
2015-03-27 19:14:45 -04:00
|
|
|
}
|
|
|
|
#endif
|
2014-04-14 11:22:44 -04:00
|
|
|
SET_TOP(mp_call_method_n_kw_var(false, unum, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_CALL_METHOD): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
// unum & 0xff == n_positional
|
|
|
|
// (unum >> 8) & 0xff == n_keyword
|
|
|
|
sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 1;
|
2015-03-27 19:14:44 -04:00
|
|
|
#if MICROPY_STACKLESS
|
|
|
|
if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
|
|
|
|
code_state->ip = ip;
|
|
|
|
code_state->sp = sp;
|
|
|
|
code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
|
|
|
|
|
|
|
|
mp_uint_t n_args = unum & 0xff;
|
|
|
|
mp_uint_t n_kw = (unum >> 8) & 0xff;
|
|
|
|
int adjust = (sp[1] == NULL) ? 0 : 1;
|
|
|
|
|
|
|
|
mp_code_state *new_state = mp_obj_fun_bc_prepare_codestate(*sp, n_args + adjust, n_kw, sp + 2 - adjust);
|
|
|
|
if (new_state) {
|
|
|
|
new_state->prev = code_state;
|
|
|
|
code_state = new_state;
|
|
|
|
nlr_pop();
|
|
|
|
goto run_code_state;
|
|
|
|
}
|
2015-03-27 19:14:45 -04:00
|
|
|
#if MICROPY_STACKLESS_STRICT
|
|
|
|
else {
|
|
|
|
goto deep_recursion_error;
|
|
|
|
}
|
|
|
|
#endif
|
2015-03-27 19:14:44 -04:00
|
|
|
}
|
|
|
|
#endif
|
2014-04-14 11:22:44 -04:00
|
|
|
SET_TOP(mp_call_method_n_kw(unum & 0xff, (unum >> 8) & 0xff, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-12-02 14:25:10 -05:00
|
|
|
ENTRY(MP_BC_CALL_METHOD_VAR_KW): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_UINT;
|
|
|
|
// unum & 0xff == n_positional
|
|
|
|
// (unum >> 8) & 0xff == n_keyword
|
|
|
|
// We have folowing stack layout here:
|
|
|
|
// fun self arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
|
|
|
|
sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 3;
|
2015-03-27 19:14:45 -04:00
|
|
|
#if MICROPY_STACKLESS
|
|
|
|
if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
|
|
|
|
code_state->ip = ip;
|
|
|
|
code_state->sp = sp;
|
|
|
|
code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
|
|
|
|
|
2015-04-01 18:31:30 -04:00
|
|
|
mp_call_args_t out_args;
|
2015-03-27 19:14:45 -04:00
|
|
|
mp_call_prepare_args_n_kw_var(true, unum, sp, &out_args);
|
|
|
|
|
|
|
|
mp_code_state *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
|
|
|
|
out_args.n_args, out_args.n_kw, out_args.args);
|
|
|
|
m_del(mp_obj_t, out_args.args, out_args.n_alloc);
|
|
|
|
if (new_state) {
|
|
|
|
new_state->prev = code_state;
|
|
|
|
code_state = new_state;
|
|
|
|
nlr_pop();
|
|
|
|
goto run_code_state;
|
|
|
|
}
|
2015-03-27 19:14:45 -04:00
|
|
|
#if MICROPY_STACKLESS_STRICT
|
|
|
|
else {
|
|
|
|
goto deep_recursion_error;
|
|
|
|
}
|
|
|
|
#endif
|
2015-03-27 19:14:45 -04:00
|
|
|
}
|
|
|
|
#endif
|
2014-04-14 11:22:44 -04:00
|
|
|
SET_TOP(mp_call_method_n_kw_var(true, unum, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 14:25:10 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
|
|
|
ENTRY(MP_BC_RETURN_VALUE):
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-01-31 17:55:05 -05:00
|
|
|
unwind_return:
|
2014-04-14 11:22:44 -04:00
|
|
|
while (exc_sp >= exc_stack) {
|
2014-12-22 07:49:57 -05:00
|
|
|
if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
|
2014-04-14 11:22:44 -04:00
|
|
|
// We're going to run "finally" code as a coroutine
|
|
|
|
// (not calling it recursively). Set up a sentinel
|
|
|
|
// on a stack so it can return back to us when it is
|
|
|
|
// done (when END_FINALLY reached).
|
|
|
|
PUSH(MP_OBJ_NEW_SMALL_INT(UNWIND_RETURN));
|
|
|
|
ip = exc_sp->handler;
|
|
|
|
// We don't need to do anything with sp, finally is just
|
|
|
|
// syntactic sugar for sequential execution??
|
|
|
|
// sp =
|
2014-01-31 17:55:05 -05:00
|
|
|
exc_sp--;
|
2014-04-14 11:22:44 -04:00
|
|
|
goto dispatch_loop;
|
2014-01-31 17:55:05 -05:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
exc_sp--;
|
|
|
|
}
|
|
|
|
nlr_pop();
|
2014-05-31 09:50:46 -04:00
|
|
|
code_state->sp = sp;
|
2014-04-14 11:22:44 -04:00
|
|
|
assert(exc_sp == exc_stack - 1);
|
2015-03-27 19:14:44 -04:00
|
|
|
#if MICROPY_STACKLESS
|
|
|
|
if (code_state->prev != NULL) {
|
|
|
|
mp_obj_t res = *sp;
|
|
|
|
mp_globals_set(code_state->old_globals);
|
|
|
|
code_state = code_state->prev;
|
|
|
|
*code_state->sp = res;
|
|
|
|
goto run_code_state;
|
|
|
|
}
|
|
|
|
#endif
|
2014-04-14 11:22:44 -04:00
|
|
|
return MP_VM_RETURN_NORMAL;
|
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_RAISE_VARARGS): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-12-02 14:25:10 -05:00
|
|
|
mp_uint_t unum = *ip++;
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t obj;
|
2014-04-14 11:22:44 -04:00
|
|
|
assert(unum <= 1);
|
|
|
|
if (unum == 0) {
|
|
|
|
// search for the inner-most previous exception, to reraise it
|
2014-05-25 17:58:04 -04:00
|
|
|
obj = MP_OBJ_NULL;
|
2014-04-14 11:22:44 -04:00
|
|
|
for (mp_exc_stack_t *e = exc_sp; e >= exc_stack; e--) {
|
|
|
|
if (e->prev_exc != MP_OBJ_NULL) {
|
2014-05-25 17:58:04 -04:00
|
|
|
obj = e->prev_exc;
|
2014-04-14 11:22:44 -04:00
|
|
|
break;
|
2014-03-29 13:44:15 -04:00
|
|
|
}
|
2014-03-26 08:42:17 -04:00
|
|
|
}
|
2014-05-25 17:58:04 -04:00
|
|
|
if (obj == MP_OBJ_NULL) {
|
|
|
|
obj = mp_obj_new_exception_msg(&mp_type_RuntimeError, "No active exception to reraise");
|
|
|
|
RAISE(obj);
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
|
|
|
} else {
|
2014-05-25 17:58:04 -04:00
|
|
|
obj = POP();
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
2014-05-25 17:58:04 -04:00
|
|
|
obj = mp_make_raise_obj(obj);
|
|
|
|
RAISE(obj);
|
|
|
|
}
|
2014-01-10 09:09:55 -05:00
|
|
|
|
2014-04-14 11:22:44 -04:00
|
|
|
ENTRY(MP_BC_YIELD_VALUE):
|
2014-03-26 11:36:12 -04:00
|
|
|
yield:
|
2014-04-14 11:22:44 -04:00
|
|
|
nlr_pop();
|
2014-05-31 09:50:46 -04:00
|
|
|
code_state->ip = ip;
|
|
|
|
code_state->sp = sp;
|
|
|
|
code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
|
2014-04-14 11:22:44 -04:00
|
|
|
return MP_VM_RETURN_YIELD;
|
2013-10-15 17:25:17 -04:00
|
|
|
|
2014-04-14 11:22:44 -04:00
|
|
|
ENTRY(MP_BC_YIELD_FROM): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-03-26 11:36:12 -04:00
|
|
|
//#define EXC_MATCH(exc, type) MP_OBJ_IS_TYPE(exc, type)
|
|
|
|
#define EXC_MATCH(exc, type) mp_obj_exception_match(exc, type)
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
#define GENERATOR_EXIT_IF_NEEDED(t) if (t != MP_OBJ_NULL && EXC_MATCH(t, &mp_type_GeneratorExit)) { RAISE(t); }
|
2014-04-14 11:22:44 -04:00
|
|
|
mp_vm_return_kind_t ret_kind;
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t send_value = POP();
|
2014-04-14 11:22:44 -04:00
|
|
|
mp_obj_t t_exc = MP_OBJ_NULL;
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t ret_value;
|
2014-04-14 11:22:44 -04:00
|
|
|
if (inject_exc != MP_OBJ_NULL) {
|
|
|
|
t_exc = inject_exc;
|
|
|
|
inject_exc = MP_OBJ_NULL;
|
2014-05-25 17:58:04 -04:00
|
|
|
ret_kind = mp_resume(TOP(), MP_OBJ_NULL, t_exc, &ret_value);
|
2014-04-14 11:22:44 -04:00
|
|
|
} else {
|
2014-05-25 17:58:04 -04:00
|
|
|
ret_kind = mp_resume(TOP(), send_value, MP_OBJ_NULL, &ret_value);
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
2014-03-26 11:36:12 -04:00
|
|
|
|
2014-04-14 11:22:44 -04:00
|
|
|
if (ret_kind == MP_VM_RETURN_YIELD) {
|
|
|
|
ip--;
|
2014-05-25 17:58:04 -04:00
|
|
|
PUSH(ret_value);
|
2014-04-14 11:22:44 -04:00
|
|
|
goto yield;
|
|
|
|
}
|
|
|
|
if (ret_kind == MP_VM_RETURN_NORMAL) {
|
|
|
|
// Pop exhausted gen
|
|
|
|
sp--;
|
2014-05-25 17:58:04 -04:00
|
|
|
if (ret_value == MP_OBJ_NULL) {
|
2014-04-14 11:22:44 -04:00
|
|
|
// Optimize StopIteration
|
|
|
|
// TODO: get StopIteration's value
|
|
|
|
PUSH(mp_const_none);
|
|
|
|
} else {
|
2014-05-25 17:58:04 -04:00
|
|
|
PUSH(ret_value);
|
2014-03-26 11:36:12 -04:00
|
|
|
}
|
|
|
|
|
2014-04-14 11:22:44 -04:00
|
|
|
// If we injected GeneratorExit downstream, then even
|
|
|
|
// if it was swallowed, we re-raise GeneratorExit
|
|
|
|
GENERATOR_EXIT_IF_NEEDED(t_exc);
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
if (ret_kind == MP_VM_RETURN_EXCEPTION) {
|
|
|
|
// Pop exhausted gen
|
|
|
|
sp--;
|
2014-05-25 17:58:04 -04:00
|
|
|
if (EXC_MATCH(ret_value, &mp_type_StopIteration)) {
|
|
|
|
PUSH(mp_obj_exception_get_value(ret_value));
|
2014-03-26 13:24:03 -04:00
|
|
|
// If we injected GeneratorExit downstream, then even
|
|
|
|
// if it was swallowed, we re-raise GeneratorExit
|
|
|
|
GENERATOR_EXIT_IF_NEEDED(t_exc);
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
|
|
|
} else {
|
2014-05-25 17:58:04 -04:00
|
|
|
RAISE(ret_value);
|
2014-03-26 11:36:12 -04:00
|
|
|
}
|
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
}
|
2014-03-26 11:36:12 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_IMPORT_NAME): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t obj = POP();
|
|
|
|
SET_TOP(mp_import_name(qst, obj, TOP()));
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
2014-05-25 17:58:04 -04:00
|
|
|
ENTRY(MP_BC_IMPORT_FROM): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
DECODE_QSTR;
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t obj = mp_import_from(TOP(), qst);
|
|
|
|
PUSH(obj);
|
2014-04-14 11:22:44 -04:00
|
|
|
DISPATCH();
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
2014-04-14 11:22:44 -04:00
|
|
|
|
|
|
|
ENTRY(MP_BC_IMPORT_STAR):
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 11:22:44 -04:00
|
|
|
mp_import_all(POP());
|
|
|
|
DISPATCH();
|
|
|
|
|
py: Compress load-int, load-fast, store-fast, unop, binop bytecodes.
There is a lot potential in compress bytecodes and make more use of the
coding space. This patch introduces "multi" bytecodes which have their
argument included in the bytecode (by addition).
UNARY_OP and BINARY_OP now no longer take a 1 byte argument for the
opcode. Rather, the opcode is included in the first byte itself.
LOAD_FAST_[0,1,2] and STORE_FAST_[0,1,2] are removed in favour of their
multi versions, which can take an argument between 0 and 15 inclusive.
The majority of LOAD_FAST/STORE_FAST codes fit in this range and so this
saves a byte for each of these.
LOAD_CONST_SMALL_INT_MULTI is used to load small ints between -16 and 47
inclusive. Such ints are quite common and now only need 1 byte to
store, and now have much faster decoding.
In all this patch saves about 2% RAM for typically bytecode (1.8% on
64-bit test, 2.5% on pyboard test). It also reduces the binary size
(because bytecodes are simplified) and doesn't harm performance.
2014-10-25 11:43:46 -04:00
|
|
|
#if MICROPY_OPT_COMPUTED_GOTO
|
|
|
|
ENTRY(MP_BC_LOAD_CONST_SMALL_INT_MULTI):
|
|
|
|
PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16));
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_LOAD_FAST_MULTI):
|
|
|
|
obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
|
|
|
|
goto load_check;
|
|
|
|
|
|
|
|
ENTRY(MP_BC_STORE_FAST_MULTI):
|
|
|
|
fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_UNARY_OP_MULTI):
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
py: Compress load-int, load-fast, store-fast, unop, binop bytecodes.
There is a lot potential in compress bytecodes and make more use of the
coding space. This patch introduces "multi" bytecodes which have their
argument included in the bytecode (by addition).
UNARY_OP and BINARY_OP now no longer take a 1 byte argument for the
opcode. Rather, the opcode is included in the first byte itself.
LOAD_FAST_[0,1,2] and STORE_FAST_[0,1,2] are removed in favour of their
multi versions, which can take an argument between 0 and 15 inclusive.
The majority of LOAD_FAST/STORE_FAST codes fit in this range and so this
saves a byte for each of these.
LOAD_CONST_SMALL_INT_MULTI is used to load small ints between -16 and 47
inclusive. Such ints are quite common and now only need 1 byte to
store, and now have much faster decoding.
In all this patch saves about 2% RAM for typically bytecode (1.8% on
64-bit test, 2.5% on pyboard test). It also reduces the binary size
(because bytecodes are simplified) and doesn't harm performance.
2014-10-25 11:43:46 -04:00
|
|
|
SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_BINARY_OP_MULTI): {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
py: Compress load-int, load-fast, store-fast, unop, binop bytecodes.
There is a lot potential in compress bytecodes and make more use of the
coding space. This patch introduces "multi" bytecodes which have their
argument included in the bytecode (by addition).
UNARY_OP and BINARY_OP now no longer take a 1 byte argument for the
opcode. Rather, the opcode is included in the first byte itself.
LOAD_FAST_[0,1,2] and STORE_FAST_[0,1,2] are removed in favour of their
multi versions, which can take an argument between 0 and 15 inclusive.
The majority of LOAD_FAST/STORE_FAST codes fit in this range and so this
saves a byte for each of these.
LOAD_CONST_SMALL_INT_MULTI is used to load small ints between -16 and 47
inclusive. Such ints are quite common and now only need 1 byte to
store, and now have much faster decoding.
In all this patch saves about 2% RAM for typically bytecode (1.8% on
64-bit test, 2.5% on pyboard test). It also reduces the binary size
(because bytecodes are simplified) and doesn't harm performance.
2014-10-25 11:43:46 -04:00
|
|
|
mp_obj_t rhs = POP();
|
|
|
|
mp_obj_t lhs = TOP();
|
|
|
|
SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
|
|
|
|
ENTRY_DEFAULT:
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
py: Compress load-int, load-fast, store-fast, unop, binop bytecodes.
There is a lot potential in compress bytecodes and make more use of the
coding space. This patch introduces "multi" bytecodes which have their
argument included in the bytecode (by addition).
UNARY_OP and BINARY_OP now no longer take a 1 byte argument for the
opcode. Rather, the opcode is included in the first byte itself.
LOAD_FAST_[0,1,2] and STORE_FAST_[0,1,2] are removed in favour of their
multi versions, which can take an argument between 0 and 15 inclusive.
The majority of LOAD_FAST/STORE_FAST codes fit in this range and so this
saves a byte for each of these.
LOAD_CONST_SMALL_INT_MULTI is used to load small ints between -16 and 47
inclusive. Such ints are quite common and now only need 1 byte to
store, and now have much faster decoding.
In all this patch saves about 2% RAM for typically bytecode (1.8% on
64-bit test, 2.5% on pyboard test). It also reduces the binary size
(because bytecodes are simplified) and doesn't harm performance.
2014-10-25 11:43:46 -04:00
|
|
|
#else
|
|
|
|
ENTRY_DEFAULT:
|
|
|
|
if (ip[-1] < MP_BC_LOAD_CONST_SMALL_INT_MULTI + 64) {
|
|
|
|
PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16));
|
|
|
|
DISPATCH();
|
|
|
|
} else if (ip[-1] < MP_BC_LOAD_FAST_MULTI + 16) {
|
|
|
|
obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
|
|
|
|
goto load_check;
|
|
|
|
} else if (ip[-1] < MP_BC_STORE_FAST_MULTI + 16) {
|
|
|
|
fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
|
|
|
|
DISPATCH();
|
|
|
|
} else if (ip[-1] < MP_BC_UNARY_OP_MULTI + 5) {
|
|
|
|
SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
|
|
|
|
DISPATCH();
|
|
|
|
} else if (ip[-1] < MP_BC_BINARY_OP_MULTI + 35) {
|
|
|
|
mp_obj_t rhs = POP();
|
|
|
|
mp_obj_t lhs = TOP();
|
|
|
|
SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
|
|
|
|
DISPATCH();
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2014-05-25 17:58:04 -04:00
|
|
|
mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NotImplementedError, "byte code not implemented");
|
2014-04-14 11:22:44 -04:00
|
|
|
nlr_pop();
|
2014-05-25 17:58:04 -04:00
|
|
|
fastn[0] = obj;
|
2014-04-14 11:22:44 -04:00
|
|
|
return MP_VM_RETURN_EXCEPTION;
|
2014-05-25 17:58:04 -04:00
|
|
|
}
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
|
2014-05-21 15:32:59 -04:00
|
|
|
#if !MICROPY_OPT_COMPUTED_GOTO
|
2014-04-15 03:57:01 -04:00
|
|
|
} // switch
|
2014-04-14 11:22:44 -04:00
|
|
|
#endif
|
2014-10-25 13:19:55 -04:00
|
|
|
|
|
|
|
pending_exception_check:
|
2015-01-01 18:30:53 -05:00
|
|
|
if (MP_STATE_VM(mp_pending_exception) != MP_OBJ_NULL) {
|
2014-12-28 00:17:43 -05:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2015-01-01 18:30:53 -05:00
|
|
|
mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
|
|
|
|
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
|
2014-10-25 13:19:55 -04:00
|
|
|
RAISE(obj);
|
|
|
|
}
|
|
|
|
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
} // for loop
|
2013-10-15 17:25:17 -04:00
|
|
|
|
|
|
|
} else {
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
exception_handler:
|
2013-10-15 17:25:17 -04:00
|
|
|
// exception occurred
|
|
|
|
|
2015-04-24 20:17:41 -04:00
|
|
|
#if MICROPY_PY_SYS_EXC_INFO
|
|
|
|
MP_STATE_VM(cur_exception) = nlr.ret_val;
|
|
|
|
#endif
|
|
|
|
|
2014-12-28 19:29:59 -05:00
|
|
|
#if SELECTIVE_EXC_IP
|
|
|
|
// with selective ip, we store the ip 1 byte past the opcode, so move ptr back
|
|
|
|
code_state->ip -= 1;
|
|
|
|
#endif
|
|
|
|
|
2014-03-26 14:37:06 -04:00
|
|
|
// check if it's a StopIteration within a for block
|
py, vm: Replace save_ip, save_sp with code_state->{ip, sp}.
This may seem a bit of a risky change, in that it may introduce crazy
bugs with respect to volatile variables in the VM loop. But, I think it
should be fine: code_state points to some external memory, so the
compiler should always read/write to that memory when accessing the
ip/sp variables (ie not put them in registers).
Anyway, it passes all tests and improves on all efficiency fronts: about
2-4% faster (64-bit unix), 16 bytes less stack space per call (64-bit
unix) and slightly less executable size (unix and stmhal).
The reason it's more efficient is save_ip and save_sp were volatile
variables, so were anyway stored on the stack (in memory, not regs).
Thus converting them to code_state->{ip, sp} doesn't cost an extra
memory dereference (except maybe to get code_state, but that can be put
in a register and then made more efficient for other uses of it).
2014-06-01 07:32:28 -04:00
|
|
|
if (*code_state->ip == MP_BC_FOR_ITER && mp_obj_is_subclass_fast(mp_obj_get_type(nlr.ret_val), &mp_type_StopIteration)) {
|
|
|
|
const byte *ip = code_state->ip + 1;
|
2014-03-26 14:37:06 -04:00
|
|
|
DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
|
2014-12-02 14:25:10 -05:00
|
|
|
code_state->ip = ip + ulab; // jump to after for-block
|
py, vm: Replace save_ip, save_sp with code_state->{ip, sp}.
This may seem a bit of a risky change, in that it may introduce crazy
bugs with respect to volatile variables in the VM loop. But, I think it
should be fine: code_state points to some external memory, so the
compiler should always read/write to that memory when accessing the
ip/sp variables (ie not put them in registers).
Anyway, it passes all tests and improves on all efficiency fronts: about
2-4% faster (64-bit unix), 16 bytes less stack space per call (64-bit
unix) and slightly less executable size (unix and stmhal).
The reason it's more efficient is save_ip and save_sp were volatile
variables, so were anyway stored on the stack (in memory, not regs).
Thus converting them to code_state->{ip, sp} doesn't cost an extra
memory dereference (except maybe to get code_state, but that can be put
in a register and then made more efficient for other uses of it).
2014-06-01 07:32:28 -04:00
|
|
|
code_state->sp -= 1; // pop the exhausted iterator
|
2014-03-26 14:37:06 -04:00
|
|
|
goto outer_dispatch_loop; // continue with dispatch loop
|
|
|
|
}
|
|
|
|
|
2015-03-27 19:14:44 -04:00
|
|
|
#if MICROPY_STACKLESS
|
|
|
|
unwind_loop:
|
|
|
|
#endif
|
2014-01-18 18:24:36 -05:00
|
|
|
// set file and line number that the exception occurred at
|
2014-01-30 06:49:18 -05:00
|
|
|
// TODO: don't set traceback for exceptions re-raised by END_FINALLY.
|
|
|
|
// But consider how to handle nested exceptions.
|
2014-04-04 06:52:59 -04:00
|
|
|
// TODO need a better way of not adding traceback to constant objects (right now, just GeneratorExit_obj and MemoryError_obj)
|
|
|
|
if (mp_obj_is_exception_instance(nlr.ret_val) && nlr.ret_val != &mp_const_GeneratorExit_obj && nlr.ret_val != &mp_const_MemoryError_obj) {
|
2014-09-04 09:44:01 -04:00
|
|
|
const byte *ip = code_state->code_info;
|
|
|
|
mp_uint_t code_info_size = mp_decode_uint(&ip);
|
|
|
|
qstr block_name = mp_decode_uint(&ip);
|
|
|
|
qstr source_file = mp_decode_uint(&ip);
|
|
|
|
mp_uint_t bc = code_state->ip - code_state->code_info - code_info_size;
|
2014-08-26 18:35:57 -04:00
|
|
|
mp_uint_t source_line = 1;
|
|
|
|
mp_uint_t c;
|
2014-09-04 09:44:01 -04:00
|
|
|
while ((c = *ip)) {
|
2014-08-26 18:35:57 -04:00
|
|
|
mp_uint_t b, l;
|
|
|
|
if ((c & 0x80) == 0) {
|
|
|
|
// 0b0LLBBBBB encoding
|
|
|
|
b = c & 0x1f;
|
|
|
|
l = c >> 5;
|
2014-09-04 09:44:01 -04:00
|
|
|
ip += 1;
|
2014-08-26 18:35:57 -04:00
|
|
|
} else {
|
|
|
|
// 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
|
|
|
|
b = c & 0xf;
|
2014-09-04 09:44:01 -04:00
|
|
|
l = ((c << 4) & 0x700) | ip[1];
|
|
|
|
ip += 2;
|
2014-08-26 18:35:57 -04:00
|
|
|
}
|
|
|
|
if (bc >= b) {
|
|
|
|
bc -= b;
|
|
|
|
source_line += l;
|
|
|
|
} else {
|
|
|
|
// found source line corresponding to bytecode offset
|
|
|
|
break;
|
2014-06-02 11:24:34 -04:00
|
|
|
}
|
2014-01-18 18:24:36 -05:00
|
|
|
}
|
2014-01-19 07:38:49 -05:00
|
|
|
mp_obj_exception_add_traceback(nlr.ret_val, source_file, source_line, block_name);
|
2014-01-18 18:24:36 -05:00
|
|
|
}
|
|
|
|
|
2013-12-29 11:54:59 -05:00
|
|
|
while (currently_in_except_block) {
|
|
|
|
// nested exception
|
|
|
|
|
2014-03-22 07:49:31 -04:00
|
|
|
assert(exc_sp >= exc_stack);
|
2013-12-29 11:54:59 -05:00
|
|
|
|
|
|
|
// TODO make a proper message for nested exception
|
|
|
|
// at the moment we are just raising the very last exception (the one that caused the nested exception)
|
|
|
|
|
|
|
|
// move up to previous exception handler
|
2014-03-29 17:16:27 -04:00
|
|
|
POP_EXC_BLOCK();
|
2013-12-29 11:54:59 -05:00
|
|
|
}
|
|
|
|
|
2014-03-22 07:49:31 -04:00
|
|
|
if (exc_sp >= exc_stack) {
|
2013-12-29 11:54:59 -05:00
|
|
|
// set flag to indicate that we are now handling an exception
|
|
|
|
currently_in_except_block = 1;
|
|
|
|
|
2013-10-15 17:25:17 -04:00
|
|
|
// catch exception and pass to byte code
|
2014-05-31 09:50:46 -04:00
|
|
|
code_state->ip = exc_sp->handler;
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 11:50:23 -04:00
|
|
|
mp_obj_t *sp = MP_TAGPTR_PTR(exc_sp->val_sp);
|
2014-03-29 20:54:48 -04:00
|
|
|
// save this exception in the stack so it can be used in a reraise, if needed
|
|
|
|
exc_sp->prev_exc = nlr.ret_val;
|
2013-10-15 18:46:01 -04:00
|
|
|
// push(traceback, exc-val, exc-type)
|
2013-12-21 13:17:45 -05:00
|
|
|
PUSH(mp_const_none);
|
2013-10-15 18:46:01 -04:00
|
|
|
PUSH(nlr.ret_val);
|
2014-03-28 20:52:17 -04:00
|
|
|
PUSH(mp_obj_get_type(nlr.ret_val));
|
2014-05-31 09:50:46 -04:00
|
|
|
code_state->sp = sp;
|
2013-12-29 11:54:59 -05:00
|
|
|
|
2015-03-27 19:14:44 -04:00
|
|
|
#if MICROPY_STACKLESS
|
|
|
|
} else if (code_state->prev != NULL) {
|
|
|
|
mp_globals_set(code_state->old_globals);
|
|
|
|
code_state = code_state->prev;
|
|
|
|
fastn = &code_state->state[code_state->n_state - 1];
|
|
|
|
exc_stack = (mp_exc_stack_t*)(code_state->state + code_state->n_state);
|
|
|
|
// variables that are visible to the exception handler (declared volatile)
|
|
|
|
currently_in_except_block = MP_TAGPTR_TAG0(code_state->exc_sp); // 0 or 1, to detect nested exceptions
|
|
|
|
exc_sp = MP_TAGPTR_PTR(code_state->exc_sp); // stack grows up, exc_sp points to top of stack
|
|
|
|
goto unwind_loop;
|
|
|
|
|
|
|
|
#endif
|
2013-10-15 17:25:17 -04:00
|
|
|
} else {
|
2014-02-15 17:55:00 -05:00
|
|
|
// propagate exception to higher level
|
|
|
|
// TODO what to do about ip and sp? they don't really make sense at this point
|
|
|
|
fastn[0] = nlr.ret_val; // must put exception here because sp is invalid
|
|
|
|
return MP_VM_RETURN_EXCEPTION;
|
2013-10-15 17:25:17 -04:00
|
|
|
}
|
2013-10-04 14:53:11 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|