py/gc: Implement GC running by allocation threshold.
Currently, MicroPython runs GC when it could not allocate a block of memory, which happens when heap is exhausted. However, that policy can't work well with "inifinity" heaps, e.g. backed by a virtual memory - there will be a lot of swap thrashing long before VM will be exhausted. Instead, in such cases "allocation threshold" policy is used: a GC is run after some number of allocations have been made. Details vary, for example, number or total amount of allocations can be used, threshold may be self-adjusting based on GC outcome, etc. This change implements a simple variant of such policy for MicroPython. Amount of allocated memory so far is used for threshold, to make it useful to typical finite-size, and small, heaps as used with MicroPython ports. And such GC policy is indeed useful for such types of heaps too, as it allows to better control fragmentation. For example, if a threshold is set to half size of heap, then for an application which usually makes big number of small allocations, that will (try to) keep half of heap memory in a nice defragmented state for an occasional large allocation. For an application which doesn't exhibit such behavior, there won't be any visible effects, except for GC running more frequently, which however may affect performance. To address this, the GC threshold is configurable, and by default is off so far. It's configured with gc.threshold(amount_in_bytes) call (can be queries without an argument).
This commit is contained in:
parent
04c27e5eaa
commit
93e353e384
22
py/gc.c
22
py/gc.c
@ -152,6 +152,12 @@ void gc_init(void *start, void *end) {
|
|||||||
// allow auto collection
|
// allow auto collection
|
||||||
MP_STATE_MEM(gc_auto_collect_enabled) = 1;
|
MP_STATE_MEM(gc_auto_collect_enabled) = 1;
|
||||||
|
|
||||||
|
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||||
|
// by default, maxuint for gc threshold, effectively turning gc-by-threshold off
|
||||||
|
MP_STATE_MEM(gc_alloc_threshold) = (size_t)-1;
|
||||||
|
MP_STATE_MEM(gc_alloc_amount) = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
#if MICROPY_PY_THREAD
|
#if MICROPY_PY_THREAD
|
||||||
mp_thread_mutex_init(&MP_STATE_MEM(gc_mutex));
|
mp_thread_mutex_init(&MP_STATE_MEM(gc_mutex));
|
||||||
#endif
|
#endif
|
||||||
@ -294,6 +300,9 @@ STATIC void gc_sweep(void) {
|
|||||||
void gc_collect_start(void) {
|
void gc_collect_start(void) {
|
||||||
GC_ENTER();
|
GC_ENTER();
|
||||||
MP_STATE_MEM(gc_lock_depth)++;
|
MP_STATE_MEM(gc_lock_depth)++;
|
||||||
|
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||||
|
MP_STATE_MEM(gc_alloc_amount) = 0;
|
||||||
|
#endif
|
||||||
MP_STATE_MEM(gc_stack_overflow) = 0;
|
MP_STATE_MEM(gc_stack_overflow) = 0;
|
||||||
MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack);
|
MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack);
|
||||||
// Trace root pointers. This relies on the root pointers being organised
|
// Trace root pointers. This relies on the root pointers being organised
|
||||||
@ -405,6 +414,15 @@ void *gc_alloc(size_t n_bytes, bool has_finaliser) {
|
|||||||
size_t start_block;
|
size_t start_block;
|
||||||
size_t n_free = 0;
|
size_t n_free = 0;
|
||||||
int collected = !MP_STATE_MEM(gc_auto_collect_enabled);
|
int collected = !MP_STATE_MEM(gc_auto_collect_enabled);
|
||||||
|
|
||||||
|
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||||
|
if (!collected && MP_STATE_MEM(gc_alloc_amount) >= MP_STATE_MEM(gc_alloc_threshold)) {
|
||||||
|
GC_EXIT();
|
||||||
|
gc_collect();
|
||||||
|
GC_ENTER();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
|
||||||
// look for a run of n_blocks available blocks
|
// look for a run of n_blocks available blocks
|
||||||
@ -456,6 +474,10 @@ found:
|
|||||||
void *ret_ptr = (void*)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK);
|
void *ret_ptr = (void*)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK);
|
||||||
DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
|
DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
|
||||||
|
|
||||||
|
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||||
|
MP_STATE_MEM(gc_alloc_amount) += n_blocks;
|
||||||
|
#endif
|
||||||
|
|
||||||
GC_EXIT();
|
GC_EXIT();
|
||||||
|
|
||||||
// zero out the additional bytes of the newly allocated blocks
|
// zero out the additional bytes of the newly allocated blocks
|
||||||
|
22
py/modgc.c
22
py/modgc.c
@ -83,6 +83,25 @@ STATIC mp_obj_t gc_mem_alloc(void) {
|
|||||||
}
|
}
|
||||||
MP_DEFINE_CONST_FUN_OBJ_0(gc_mem_alloc_obj, gc_mem_alloc);
|
MP_DEFINE_CONST_FUN_OBJ_0(gc_mem_alloc_obj, gc_mem_alloc);
|
||||||
|
|
||||||
|
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||||
|
STATIC mp_obj_t gc_threshold(size_t n_args, const mp_obj_t *args) {
|
||||||
|
if (n_args == 0) {
|
||||||
|
if (MP_STATE_MEM(gc_alloc_threshold) == (size_t)-1) {
|
||||||
|
return MP_OBJ_NEW_SMALL_INT(-1);
|
||||||
|
}
|
||||||
|
return mp_obj_new_int(MP_STATE_MEM(gc_alloc_threshold) * MICROPY_BYTES_PER_GC_BLOCK);
|
||||||
|
}
|
||||||
|
mp_int_t val = mp_obj_get_int(args[0]);
|
||||||
|
if (val < 0) {
|
||||||
|
MP_STATE_MEM(gc_alloc_threshold) = (size_t)-1;
|
||||||
|
} else {
|
||||||
|
MP_STATE_MEM(gc_alloc_threshold) = val / MICROPY_BYTES_PER_GC_BLOCK;
|
||||||
|
}
|
||||||
|
return mp_const_none;
|
||||||
|
}
|
||||||
|
MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(gc_threshold_obj, 0, 1, gc_threshold);
|
||||||
|
#endif
|
||||||
|
|
||||||
STATIC const mp_rom_map_elem_t mp_module_gc_globals_table[] = {
|
STATIC const mp_rom_map_elem_t mp_module_gc_globals_table[] = {
|
||||||
{ MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_gc) },
|
{ MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_gc) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_collect), MP_ROM_PTR(&gc_collect_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_collect), MP_ROM_PTR(&gc_collect_obj) },
|
||||||
@ -91,6 +110,9 @@ STATIC const mp_rom_map_elem_t mp_module_gc_globals_table[] = {
|
|||||||
{ MP_ROM_QSTR(MP_QSTR_isenabled), MP_ROM_PTR(&gc_isenabled_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_isenabled), MP_ROM_PTR(&gc_isenabled_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_mem_free), MP_ROM_PTR(&gc_mem_free_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_mem_free), MP_ROM_PTR(&gc_mem_free_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_mem_alloc), MP_ROM_PTR(&gc_mem_alloc_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_mem_alloc), MP_ROM_PTR(&gc_mem_alloc_obj) },
|
||||||
|
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_threshold), MP_ROM_PTR(&gc_threshold_obj) },
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
STATIC MP_DEFINE_CONST_DICT(mp_module_gc_globals, mp_module_gc_globals_table);
|
STATIC MP_DEFINE_CONST_DICT(mp_module_gc_globals, mp_module_gc_globals_table);
|
||||||
|
@ -107,6 +107,12 @@
|
|||||||
#define MICROPY_ALLOC_GC_STACK_SIZE (64)
|
#define MICROPY_ALLOC_GC_STACK_SIZE (64)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// Support automatic GC when reaching allocation threshold,
|
||||||
|
// configurable by gc.threshold().
|
||||||
|
#ifndef MICROPY_GC_ALLOC_THRESHOLD
|
||||||
|
#define MICROPY_GC_ALLOC_THRESHOLD (1)
|
||||||
|
#endif
|
||||||
|
|
||||||
// Number of bytes to allocate initially when creating new chunks to store
|
// Number of bytes to allocate initially when creating new chunks to store
|
||||||
// interned string data. Smaller numbers lead to more chunks being needed
|
// interned string data. Smaller numbers lead to more chunks being needed
|
||||||
// and more wastage at the end of the chunk. Larger numbers lead to wasted
|
// and more wastage at the end of the chunk. Larger numbers lead to wasted
|
||||||
|
@ -76,6 +76,11 @@ typedef struct _mp_state_mem_t {
|
|||||||
// you can still allocate/free memory and also explicitly call gc_collect.
|
// you can still allocate/free memory and also explicitly call gc_collect.
|
||||||
uint16_t gc_auto_collect_enabled;
|
uint16_t gc_auto_collect_enabled;
|
||||||
|
|
||||||
|
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||||
|
size_t gc_alloc_amount;
|
||||||
|
size_t gc_alloc_threshold;
|
||||||
|
#endif
|
||||||
|
|
||||||
size_t gc_last_free_atb_index;
|
size_t gc_last_free_atb_index;
|
||||||
|
|
||||||
#if MICROPY_PY_GC_COLLECT_RETVAL
|
#if MICROPY_PY_GC_COLLECT_RETVAL
|
||||||
|
Loading…
Reference in New Issue
Block a user