circuitpython/py/mpstate.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

315 lines
9.6 KiB
C
Raw Normal View History

/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
2020-06-03 18:40:05 -04:00
* SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MICROPY_INCLUDED_PY_MPSTATE_H
#define MICROPY_INCLUDED_PY_MPSTATE_H
#include <stdint.h>
#include "py/mpconfig.h"
#include "py/mpthread.h"
#include "py/misc.h"
#include "py/nlr.h"
#include "py/obj.h"
#include "py/objlist.h"
#include "py/objexcept.h"
// This file contains structures defining the state of the MicroPython
// memory system, runtime and virtual machine. The state is a global
// variable, but in the future it is hoped that the state can become local.
// This structure contains dynamic configuration for the compiler.
#if MICROPY_DYNAMIC_COMPILER
typedef struct mp_dynamic_compiler_t {
uint8_t small_int_bits; // must be <= host small_int_bits
bool py_builtins_str_unicode;
uint8_t native_arch;
uint8_t nlr_buf_num_regs;
} mp_dynamic_compiler_t;
extern mp_dynamic_compiler_t mp_dynamic_compiler;
#endif
// These are the values for sched_state
#define MP_SCHED_IDLE (1)
#define MP_SCHED_LOCKED (-1)
#define MP_SCHED_PENDING (0) // 0 so it's a quick check in the VM
typedef struct _mp_sched_item_t {
mp_obj_t func;
mp_obj_t arg;
} mp_sched_item_t;
// This structure hold information about the memory allocation system.
typedef struct _mp_state_mem_t {
#if MICROPY_MEM_STATS
size_t total_bytes_allocated;
size_t current_bytes_allocated;
size_t peak_bytes_allocated;
#endif
byte *gc_alloc_table_start;
size_t gc_alloc_table_byte_len;
#if MICROPY_ENABLE_FINALISER
byte *gc_finaliser_table_start;
#endif
byte *gc_pool_start;
byte *gc_pool_end;
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
void *gc_lowest_long_lived_ptr;
int gc_stack_overflow;
MICROPY_GC_STACK_ENTRY_TYPE gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
// This variable controls auto garbage collection. If set to false then the
// GC won't automatically run when gc_alloc can't find enough blocks. But
// you can still allocate/free memory and also explicitly call gc_collect.
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
bool gc_auto_collect_enabled;
py/gc: Implement GC running by allocation threshold. Currently, MicroPython runs GC when it could not allocate a block of memory, which happens when heap is exhausted. However, that policy can't work well with "inifinity" heaps, e.g. backed by a virtual memory - there will be a lot of swap thrashing long before VM will be exhausted. Instead, in such cases "allocation threshold" policy is used: a GC is run after some number of allocations have been made. Details vary, for example, number or total amount of allocations can be used, threshold may be self-adjusting based on GC outcome, etc. This change implements a simple variant of such policy for MicroPython. Amount of allocated memory so far is used for threshold, to make it useful to typical finite-size, and small, heaps as used with MicroPython ports. And such GC policy is indeed useful for such types of heaps too, as it allows to better control fragmentation. For example, if a threshold is set to half size of heap, then for an application which usually makes big number of small allocations, that will (try to) keep half of heap memory in a nice defragmented state for an occasional large allocation. For an application which doesn't exhibit such behavior, there won't be any visible effects, except for GC running more frequently, which however may affect performance. To address this, the GC threshold is configurable, and by default is off so far. It's configured with gc.threshold(amount_in_bytes) call (can be queries without an argument).
2016-07-20 17:37:30 -04:00
#if MICROPY_GC_ALLOC_THRESHOLD
size_t gc_alloc_amount;
size_t gc_alloc_threshold;
#endif
size_t gc_first_free_atb_index[MICROPY_ATB_INDICES];
size_t gc_last_free_atb_index;
#if MICROPY_PY_GC_COLLECT_RETVAL
size_t gc_collected;
#endif
#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
// This is a global mutex used to make the GC thread-safe.
mp_thread_mutex_t gc_mutex;
#endif
2021-03-15 09:57:36 -04:00
void **permanent_pointers;
} mp_state_mem_t;
// This structure hold runtime and VM information. It includes a section
// which contains root pointers that must be scanned by the GC.
typedef struct _mp_state_vm_t {
//
// CONTINUE ROOT POINTER SECTION
// This must start at the start of this structure and follows
// the state in the mp_state_thread_t structure, continuing
// the root pointer section from there.
//
qstr_pool_t *last_pool;
#if MICROPY_TRACKED_ALLOC
struct _m_tracked_node_t *m_tracked_head;
#endif
2021-08-02 02:30:48 -04:00
// non-heap memory for creating a traceback if we can't allocate RAM
mp_obj_traceback_t mp_emergency_traceback_obj;
// non-heap memory for creating an exception if we can't allocate RAM
mp_obj_exception_t mp_emergency_exception_obj;
// memory for exception arguments if we can't allocate RAM
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
#if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
// statically allocated buf (needs to be aligned to mp_obj_t)
mp_obj_t mp_emergency_exception_buf[MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE / sizeof(mp_obj_t)];
#else
// dynamically allocated buf
byte *mp_emergency_exception_buf;
#endif
#endif
#if MICROPY_KBD_EXCEPTION
// exception object of type KeyboardInterrupt
mp_obj_exception_t mp_kbd_exception;
#endif
// exception object of type ReloadException
mp_obj_exception_t mp_reload_exception;
// dictionary with loaded modules (may be exposed as sys.modules)
mp_obj_dict_t mp_loaded_modules_dict;
#if MICROPY_ENABLE_SCHEDULER
mp_sched_item_t sched_queue[MICROPY_SCHEDULER_DEPTH];
#endif
// current exception being handled, for sys.exc_info()
#if MICROPY_PY_SYS_EXC_INFO
mp_obj_base_t *cur_exception;
#endif
#if MICROPY_PY_SYS_ATEXIT
// exposed through sys.atexit function
mp_obj_t sys_exitfunc;
#endif
// dictionary for the __main__ module
mp_obj_dict_t dict_main;
#if MICROPY_PY_SYS
// If MICROPY_PY_SYS_PATH_ARGV_DEFAULTS is not enabled then these two lists
// must be initialised after the call to mp_init.
mp_obj_list_t mp_sys_path_obj;
mp_obj_list_t mp_sys_argv_obj;
#endif
// dictionary for overridden builtins
#if MICROPY_CAN_OVERRIDE_BUILTINS
mp_obj_dict_t *mp_module_builtins_override_dict;
#endif
#if MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE
// An mp_obj_list_t that tracks relocated native code to prevent the GC from reclaiming them.
mp_obj_t track_reloc_code_list;
#endif
// include any root pointers defined by a port
MICROPY_PORT_ROOT_POINTERS
// root pointers for extmod
#if MICROPY_REPL_EVENT_DRIVEN
vstr_t *repl_line;
#endif
#if MICROPY_VFS
struct _mp_vfs_mount_t *vfs_cur;
struct _mp_vfs_mount_t *vfs_mount_table;
#endif
//
// END ROOT POINTER SECTION
////////////////////////////////////////////////////////////
// pointer and sizes to store interned string data
// (qstr_last_chunk can be root pointer but is also stored in qstr pool)
char *qstr_last_chunk;
size_t qstr_last_alloc;
size_t qstr_last_used;
#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
// This is a global mutex used to make qstr interning thread-safe.
mp_thread_mutex_t qstr_mutex;
#endif
#if MICROPY_ENABLE_COMPILER
mp_uint_t mp_optimise_value;
#if MICROPY_EMIT_NATIVE
uint8_t default_emit_opt; // one of MP_EMIT_OPT_xxx
#endif
#endif
// size of the emergency exception buf, if it's dynamically allocated
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0
mp_int_t mp_emergency_exception_buf_size;
#endif
#if MICROPY_ENABLE_SCHEDULER
volatile int16_t sched_state;
uint8_t sched_len;
uint8_t sched_idx;
#endif
#if MICROPY_PY_THREAD_GIL
// This is a global mutex used to make the VM/runtime thread-safe.
mp_thread_mutex_t gil_mutex;
#endif
#if MICROPY_OPT_MAP_LOOKUP_CACHE
// See mp_map_lookup.
uint8_t map_lookup_cache[MICROPY_OPT_MAP_LOOKUP_CACHE_SIZE];
#endif
} mp_state_vm_t;
// This structure holds state that is specific to a given thread.
// Everything in this structure is scanned for root pointers.
typedef struct _mp_state_thread_t {
// Stack top at the start of program
char *stack_top;
#if MICROPY_MAX_STACK_USAGE
2021-03-15 09:57:36 -04:00
char *stack_bottom;
#endif
#if MICROPY_STACK_CHECK
size_t stack_limit;
#endif
py: Introduce a Python stack for scoped allocation. This patch introduces the MICROPY_ENABLE_PYSTACK option (disabled by default) which enables a "Python stack" that allows to allocate and free memory in a scoped, or Last-In-First-Out (LIFO) way, similar to alloca(). A new memory allocation API is introduced along with this Py-stack. It includes both "local" and "nonlocal" LIFO allocation. Local allocation is intended to be equivalent to using alloca(), whereby the same function must free the memory. Nonlocal allocation is where another function may free the memory, so long as it's still LIFO. Follow-up patches will convert all uses of alloca() and VLA to the new scoped allocation API. The old behaviour (using alloca()) will still be available, but when MICROPY_ENABLE_PYSTACK is enabled then alloca() is no longer required or used. The benefits of enabling this option are (or will be once subsequent patches are made to convert alloca()/VLA): - Toolchains without alloca() can use this feature to obtain correct and efficient scoped memory allocation (compared to using the heap instead of alloca(), which is slower). - Even if alloca() is available, enabling the Py-stack gives slightly more efficient use of stack space when calling nested Python functions, due to the way that compilers implement alloca(). - Enabling the Py-stack with the stackless mode allows for even more efficient stack usage, as well as retaining high performance (because the heap is no longer used to build and destroy stackless code states). - With Py-stack and stackless enabled, Python-calling-Python is no longer recursive in the C mp_execute_bytecode function. The micropython.pystack_use() function is included to measure usage of the Python stack.
2017-11-26 07:28:40 -05:00
#if MICROPY_ENABLE_PYSTACK
uint8_t *pystack_start;
uint8_t *pystack_end;
uint8_t *pystack_cur;
#endif
// Locking of the GC is done per thread.
uint16_t gc_lock_depth;
////////////////////////////////////////////////////////////
// START ROOT POINTER SECTION
// Everything that needs GC scanning must start here, and
// is followed by state in the mp_state_vm_t structure.
//
mp_obj_dict_t *dict_locals;
mp_obj_dict_t *dict_globals;
nlr_buf_t *nlr_top;
// pending exception object (MP_OBJ_NULL if not pending)
volatile mp_obj_t mp_pending_exception;
// If MP_OBJ_STOP_ITERATION is propagated then this holds its argument.
mp_obj_t stop_iteration_arg;
#if MICROPY_PY_SYS_SETTRACE
mp_obj_t prof_trace_callback;
bool prof_callback_is_executing;
struct _mp_code_state_t *current_code_state;
#endif
} mp_state_thread_t;
// This structure combines the above 3 structures.
// The order of the entries are important for root pointer scanning in the GC to work.
typedef struct _mp_state_ctx_t {
mp_state_thread_t thread;
mp_state_vm_t vm;
mp_state_mem_t mem;
} mp_state_ctx_t;
extern mp_state_ctx_t mp_state_ctx;
#define MP_STATE_VM(x) (mp_state_ctx.vm.x)
#define MP_STATE_MEM(x) (mp_state_ctx.mem.x)
#define MP_STATE_MAIN_THREAD(x) (mp_state_ctx.thread.x)
#if MICROPY_PY_THREAD
extern mp_state_thread_t *mp_thread_get_state(void);
#define MP_STATE_THREAD(x) (mp_thread_get_state()->x)
#else
#define MP_STATE_THREAD(x) MP_STATE_MAIN_THREAD(x)
#endif
#endif // MICROPY_INCLUDED_PY_MPSTATE_H