Introduce a long lived section of the heap.
This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
This commit is contained in:
parent
56bd0789af
commit
416abe33ed
@ -30,6 +30,7 @@
|
||||
#include <assert.h>
|
||||
|
||||
#include "py/compile.h"
|
||||
#include "py/gc_long_lived.h"
|
||||
#include "py/objmodule.h"
|
||||
#include "py/persistentcode.h"
|
||||
#include "py/runtime.h"
|
||||
@ -144,6 +145,7 @@ STATIC void do_load_from_lexer(mp_obj_t module_obj, mp_lexer_t *lex) {
|
||||
// parse, compile and execute the module in its context
|
||||
mp_obj_dict_t *mod_globals = mp_obj_module_get_globals(module_obj);
|
||||
mp_parse_compile_execute(lex, MP_PARSE_FILE_INPUT, mod_globals, mod_globals);
|
||||
mp_obj_module_set_globals(module_obj, make_dict_long_lived(mod_globals, 10));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -173,6 +175,8 @@ STATIC void do_execute_raw_code(mp_obj_t module_obj, mp_raw_code_t *raw_code) {
|
||||
|
||||
// finish nlr block, restore context
|
||||
nlr_pop();
|
||||
mp_obj_module_set_globals(module_obj,
|
||||
make_dict_long_lived(mp_obj_module_get_globals(module_obj), 10));
|
||||
mp_globals_set(old_globals);
|
||||
mp_locals_set(old_locals);
|
||||
} else {
|
||||
@ -468,6 +472,10 @@ mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
|
||||
if (outer_module_obj != MP_OBJ_NULL) {
|
||||
qstr s = qstr_from_strn(mod_str + last, i - last);
|
||||
mp_store_attr(outer_module_obj, s, module_obj);
|
||||
// The above store can cause a dictionary rehash and new allocation. So,
|
||||
// lets make sure the globals dictionary is still long lived.
|
||||
mp_obj_module_set_globals(outer_module_obj,
|
||||
make_dict_long_lived(mp_obj_module_get_globals(outer_module_obj), 10));
|
||||
}
|
||||
outer_module_obj = module_obj;
|
||||
if (top_module_obj == MP_OBJ_NULL) {
|
||||
|
177
py/gc.c
177
py/gc.c
@ -41,6 +41,9 @@
|
||||
#define DEBUG_printf(...) (void)0
|
||||
#endif
|
||||
|
||||
// Uncomment this if you want to use a debugger to capture state at every allocation and free.
|
||||
// #define LOG_HEAP_ACTIVITY 1
|
||||
|
||||
// make this 1 to dump the heap each time it changes
|
||||
#define EXTENSIVE_HEAP_PROFILING (0)
|
||||
|
||||
@ -59,15 +62,6 @@
|
||||
#define AT_MARK (3)
|
||||
|
||||
#define BLOCKS_PER_ATB (4)
|
||||
#define ATB_MASK_0 (0x03)
|
||||
#define ATB_MASK_1 (0x0c)
|
||||
#define ATB_MASK_2 (0x30)
|
||||
#define ATB_MASK_3 (0xc0)
|
||||
|
||||
#define ATB_0_IS_FREE(a) (((a) & ATB_MASK_0) == 0)
|
||||
#define ATB_1_IS_FREE(a) (((a) & ATB_MASK_1) == 0)
|
||||
#define ATB_2_IS_FREE(a) (((a) & ATB_MASK_2) == 0)
|
||||
#define ATB_3_IS_FREE(a) (((a) & ATB_MASK_3) == 0)
|
||||
|
||||
#define BLOCK_SHIFT(block) (2 * ((block) & (BLOCKS_PER_ATB - 1)))
|
||||
#define ATB_GET_KIND(block) ((MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
|
||||
@ -152,14 +146,19 @@ void gc_init(void *start, void *end) {
|
||||
memset(MP_STATE_MEM(gc_finaliser_table_start), 0, gc_finaliser_table_byte_len);
|
||||
#endif
|
||||
|
||||
// set last free ATB index to start of heap
|
||||
// Set first free ATB index to the start of the heap.
|
||||
MP_STATE_MEM(gc_last_free_atb_index) = 0;
|
||||
// Set last free ATB index to the end of the heap.
|
||||
MP_STATE_MEM(gc_last_free_atb_index) = MP_STATE_MEM(gc_alloc_table_byte_len) - 1;
|
||||
// Set the lowest long lived ptr to the end of the heap to start. This will be lowered as long
|
||||
// lived objects are allocated.
|
||||
MP_STATE_MEM(gc_lowest_long_lived_ptr) = (void*) PTR_FROM_BLOCK(MP_STATE_MEM(gc_alloc_table_byte_len * BLOCKS_PER_ATB));
|
||||
|
||||
// unlock the GC
|
||||
MP_STATE_MEM(gc_lock_depth) = 0;
|
||||
|
||||
// allow auto collection
|
||||
MP_STATE_MEM(gc_auto_collect_enabled) = 1;
|
||||
MP_STATE_MEM(gc_auto_collect_enabled) = true;
|
||||
|
||||
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||
// by default, maxuint for gc threshold, effectively turning gc-by-threshold off
|
||||
@ -288,6 +287,7 @@ STATIC void gc_sweep(void) {
|
||||
}
|
||||
#endif
|
||||
free_tail = 1;
|
||||
ATB_ANY_TO_FREE(block);
|
||||
DEBUG_printf("gc_sweep(%x)\n", PTR_FROM_BLOCK(block));
|
||||
|
||||
#ifdef LOG_HEAP_ACTIVITY
|
||||
@ -296,7 +296,7 @@ STATIC void gc_sweep(void) {
|
||||
#if MICROPY_PY_GC_COLLECT_RETVAL
|
||||
MP_STATE_MEM(gc_collected)++;
|
||||
#endif
|
||||
// fall through to free the head
|
||||
break;
|
||||
|
||||
case AT_TAIL:
|
||||
if (free_tail) {
|
||||
@ -338,7 +338,8 @@ void gc_collect_root(void **ptrs, size_t len) {
|
||||
void gc_collect_end(void) {
|
||||
gc_deal_with_stack_overflow();
|
||||
gc_sweep();
|
||||
MP_STATE_MEM(gc_last_free_atb_index) = 0;
|
||||
MP_STATE_MEM(gc_first_free_atb_index) = 0;
|
||||
MP_STATE_MEM(gc_last_free_atb_index) = MP_STATE_MEM(gc_alloc_table_byte_len) - 1;
|
||||
MP_STATE_MEM(gc_lock_depth)--;
|
||||
GC_EXIT();
|
||||
}
|
||||
@ -407,7 +408,9 @@ void gc_info(gc_info_t *info) {
|
||||
GC_EXIT();
|
||||
}
|
||||
|
||||
void *gc_alloc(size_t n_bytes, bool has_finaliser) {
|
||||
// We place long lived objects at the end of the heap rather than the start. This reduces
|
||||
// fragmentation by localizing the heap churn to one portion of memory (the start of the heap.)
|
||||
void *gc_alloc(size_t n_bytes, bool has_finaliser, bool long_lived) {
|
||||
size_t n_blocks = ((n_bytes + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1))) / BYTES_PER_BLOCK;
|
||||
DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks);
|
||||
|
||||
@ -424,29 +427,62 @@ void *gc_alloc(size_t n_bytes, bool has_finaliser) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t i;
|
||||
size_t found_block = 0xffffffff;
|
||||
size_t end_block;
|
||||
size_t start_block;
|
||||
size_t n_free = 0;
|
||||
int collected = !MP_STATE_MEM(gc_auto_collect_enabled);
|
||||
size_t n_free;
|
||||
bool collected = !MP_STATE_MEM(gc_auto_collect_enabled);
|
||||
|
||||
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||
if (!collected && MP_STATE_MEM(gc_alloc_amount) >= MP_STATE_MEM(gc_alloc_threshold)) {
|
||||
GC_EXIT();
|
||||
gc_collect();
|
||||
GC_ENTER();
|
||||
collected = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (;;) {
|
||||
bool keep_looking = true;
|
||||
|
||||
// When we start searching on the other side of the crossover block we make sure to
|
||||
// perform a collect. That way we'll get the closest free block in our section.
|
||||
size_t crossover_block = BLOCK_FROM_PTR(MP_STATE_MEM(gc_lowest_long_lived_ptr));
|
||||
while (keep_looking) {
|
||||
int8_t direction = 1;
|
||||
size_t start = MP_STATE_MEM(gc_first_free_atb_index);
|
||||
if (long_lived) {
|
||||
direction = -1;
|
||||
start = MP_STATE_MEM(gc_last_free_atb_index);
|
||||
}
|
||||
n_free = 0;
|
||||
// look for a run of n_blocks available blocks
|
||||
for (i = MP_STATE_MEM(gc_last_free_atb_index); i < MP_STATE_MEM(gc_alloc_table_byte_len); i++) {
|
||||
for (size_t i = start; keep_looking && MP_STATE_MEM(gc_first_free_atb_index) <= i && i <= MP_STATE_MEM(gc_last_free_atb_index); i += direction) {
|
||||
byte a = MP_STATE_MEM(gc_alloc_table_start)[i];
|
||||
if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; }
|
||||
if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; }
|
||||
if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
|
||||
if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; }
|
||||
// Four ATB states are packed into a single byte.
|
||||
int j = 0;
|
||||
if (direction == -1) {
|
||||
j = 3;
|
||||
}
|
||||
for (; keep_looking && 0 <= j && j <= 3; j += direction) {
|
||||
if ((a & (0x3 << (j * 2))) == 0) {
|
||||
if (++n_free >= n_blocks) {
|
||||
found_block = i * BLOCKS_PER_ATB + j;
|
||||
keep_looking = false;
|
||||
}
|
||||
} else {
|
||||
if (!collected) {
|
||||
size_t block = i * BLOCKS_PER_ATB + j;
|
||||
if ((direction == 1 && block >= crossover_block) ||
|
||||
(direction == -1 && block < crossover_block)) {
|
||||
keep_looking = false;
|
||||
}
|
||||
}
|
||||
n_free = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (n_free >= n_blocks) {
|
||||
break;
|
||||
}
|
||||
|
||||
GC_EXIT();
|
||||
@ -456,23 +492,31 @@ void *gc_alloc(size_t n_bytes, bool has_finaliser) {
|
||||
}
|
||||
DEBUG_printf("gc_alloc(" UINT_FMT "): no free mem, triggering GC\n", n_bytes);
|
||||
gc_collect();
|
||||
collected = 1;
|
||||
collected = true;
|
||||
// Try again since we've hopefully freed up space.
|
||||
keep_looking = true;
|
||||
GC_ENTER();
|
||||
}
|
||||
assert(found_block != 0xffffffff);
|
||||
|
||||
// found, ending at block i inclusive
|
||||
found:
|
||||
// get starting and end blocks, both inclusive
|
||||
end_block = i;
|
||||
start_block = i - n_free + 1;
|
||||
|
||||
// Set last free ATB index to block after last block we found, for start of
|
||||
// Found free space ending at found_block inclusive.
|
||||
// Also, set last free ATB index to block after last block we found, for start of
|
||||
// next scan. To reduce fragmentation, we only do this if we were looking
|
||||
// for a single free block, which guarantees that there are no free blocks
|
||||
// before this one. Also, whenever we free or shink a block we must check
|
||||
// before this one. Also, whenever we free or shrink a block we must check
|
||||
// if this index needs adjusting (see gc_realloc and gc_free).
|
||||
if (n_free == 1) {
|
||||
MP_STATE_MEM(gc_last_free_atb_index) = (i + 1) / BLOCKS_PER_ATB;
|
||||
if (!long_lived) {
|
||||
end_block = found_block;
|
||||
start_block = found_block - n_free + 1;
|
||||
if (n_blocks == 1) {
|
||||
MP_STATE_MEM(gc_first_free_atb_index) = (found_block + 1) / BLOCKS_PER_ATB;
|
||||
}
|
||||
} else {
|
||||
start_block = found_block;
|
||||
end_block = found_block + n_free - 1;
|
||||
if (n_blocks == 1) {
|
||||
MP_STATE_MEM(gc_last_free_atb_index) = (found_block - 1) / BLOCKS_PER_ATB;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef LOG_HEAP_ACTIVITY
|
||||
@ -493,6 +537,13 @@ found:
|
||||
void *ret_ptr = (void*)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK);
|
||||
DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
|
||||
|
||||
// If the allocation was long live then update the lowest value. Its used to trigger early
|
||||
// collects when allocations fail in their respective section. Its also used to ignore calls to
|
||||
// gc_make_long_lived where the pointer is already in the long lived section.
|
||||
if (long_lived && ret_ptr < MP_STATE_MEM(gc_lowest_long_lived_ptr)) {
|
||||
MP_STATE_MEM(gc_lowest_long_lived_ptr) = ret_ptr;
|
||||
}
|
||||
|
||||
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||
MP_STATE_MEM(gc_alloc_amount) += n_blocks;
|
||||
#endif
|
||||
@ -566,7 +617,10 @@ void gc_free(void *ptr) {
|
||||
#endif
|
||||
|
||||
// set the last_free pointer to this block if it's earlier in the heap
|
||||
if (block / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
|
||||
if (block / BLOCKS_PER_ATB < MP_STATE_MEM(gc_first_free_atb_index)) {
|
||||
MP_STATE_MEM(gc_first_free_atb_index) = block / BLOCKS_PER_ATB;
|
||||
}
|
||||
if (block / BLOCKS_PER_ATB > MP_STATE_MEM(gc_last_free_atb_index)) {
|
||||
MP_STATE_MEM(gc_last_free_atb_index) = block / BLOCKS_PER_ATB;
|
||||
}
|
||||
|
||||
@ -607,6 +661,50 @@ size_t gc_nbytes(const void *ptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool gc_has_finaliser(const void *ptr) {
|
||||
#if MICROPY_ENABLE_FINALISER
|
||||
GC_ENTER();
|
||||
if (VERIFY_PTR(ptr)) {
|
||||
bool has_finaliser = FTB_GET(BLOCK_FROM_PTR(ptr));
|
||||
GC_EXIT();
|
||||
return has_finaliser;
|
||||
}
|
||||
|
||||
// invalid pointer
|
||||
GC_EXIT();
|
||||
#else
|
||||
(void) ptr;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void *gc_make_long_lived(void *old_ptr) {
|
||||
// If its already in the long lived section then don't bother moving it.
|
||||
if (old_ptr >= MP_STATE_MEM(gc_lowest_long_lived_ptr)) {
|
||||
return old_ptr;
|
||||
}
|
||||
size_t n_bytes = gc_nbytes(old_ptr);
|
||||
if (n_bytes == 0) {
|
||||
return old_ptr;
|
||||
}
|
||||
bool has_finaliser = gc_has_finaliser(old_ptr);
|
||||
|
||||
// Try and find a new area in the long lived section to copy the memory to.
|
||||
void* new_ptr = gc_alloc(n_bytes, has_finaliser, true);
|
||||
if (new_ptr == NULL) {
|
||||
return old_ptr;
|
||||
} else if (old_ptr > new_ptr) {
|
||||
// Return the old pointer if the new one is lower in the heap and free the new space.
|
||||
gc_free(new_ptr);
|
||||
return old_ptr;
|
||||
}
|
||||
// We copy everything over and let the garbage collection process delete the old copy. That way
|
||||
// we ensure we don't delete memory that has a second reference. (Though if there is we may
|
||||
// confuse things when its mutable.)
|
||||
memcpy(new_ptr, old_ptr, n_bytes);
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
#if 0
|
||||
// old, simple realloc that didn't expand memory in place
|
||||
void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
|
||||
@ -639,7 +737,7 @@ void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
|
||||
void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
|
||||
// check for pure allocation
|
||||
if (ptr_in == NULL) {
|
||||
return gc_alloc(n_bytes, false);
|
||||
return gc_alloc(n_bytes, false, false);
|
||||
}
|
||||
|
||||
// check for pure free
|
||||
@ -714,7 +812,10 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
|
||||
}
|
||||
|
||||
// set the last_free pointer to end of this block if it's earlier in the heap
|
||||
if ((block + new_blocks) / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
|
||||
if ((block + new_blocks) / BLOCKS_PER_ATB < MP_STATE_MEM(gc_first_free_atb_index)) {
|
||||
MP_STATE_MEM(gc_first_free_atb_index) = (block + new_blocks) / BLOCKS_PER_ATB;
|
||||
}
|
||||
if ((block + new_blocks) / BLOCKS_PER_ATB > MP_STATE_MEM(gc_last_free_atb_index)) {
|
||||
MP_STATE_MEM(gc_last_free_atb_index) = (block + new_blocks) / BLOCKS_PER_ATB;
|
||||
}
|
||||
|
||||
@ -774,7 +875,7 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
|
||||
}
|
||||
|
||||
// can't resize inplace; try to find a new contiguous chain
|
||||
void *ptr_out = gc_alloc(n_bytes, ftb_state);
|
||||
void *ptr_out = gc_alloc(n_bytes, ftb_state, false);
|
||||
|
||||
// check that the alloc succeeded
|
||||
if (ptr_out == NULL) {
|
||||
|
4
py/gc.h
4
py/gc.h
@ -45,9 +45,11 @@ void gc_collect_start(void);
|
||||
void gc_collect_root(void **ptrs, size_t len);
|
||||
void gc_collect_end(void);
|
||||
|
||||
void *gc_alloc(size_t n_bytes, bool has_finaliser);
|
||||
void *gc_alloc(size_t n_bytes, bool has_finaliser, bool long_lived);
|
||||
void gc_free(void *ptr); // does not call finaliser
|
||||
size_t gc_nbytes(const void *ptr);
|
||||
bool gc_has_finaliser(const void *ptr);
|
||||
void *gc_make_long_lived(void *old_ptr);
|
||||
void *gc_realloc(void *ptr, size_t n_bytes, bool allow_move);
|
||||
|
||||
typedef struct _gc_info_t {
|
||||
|
132
py/gc_long_lived.c
Normal file
132
py/gc_long_lived.c
Normal file
@ -0,0 +1,132 @@
|
||||
/*
|
||||
* This file is part of the MicroPython project, http://micropython.org/
|
||||
*
|
||||
* The MIT License (MIT)
|
||||
*
|
||||
* Copyright (c) 2018 Scott Shawcroft for Adafruit Industries LLC
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "py/emitglue.h"
|
||||
#include "py/gc_long_lived.h"
|
||||
#include "py/gc.h"
|
||||
|
||||
mp_obj_fun_bc_t *make_fun_bc_long_lived(mp_obj_fun_bc_t *fun_bc, uint8_t max_depth) {
|
||||
#ifndef MICROPY_ENABLE_GC
|
||||
return fun_bc;
|
||||
#endif
|
||||
if (fun_bc == NULL || fun_bc == mp_const_none || max_depth == 0) {
|
||||
return fun_bc;
|
||||
}
|
||||
fun_bc->bytecode = gc_make_long_lived((byte*) fun_bc->bytecode);
|
||||
fun_bc->globals = make_dict_long_lived(fun_bc->globals, max_depth - 1);
|
||||
for (uint32_t i = 0; i < gc_nbytes(fun_bc->const_table) / sizeof(mp_obj_t); i++) {
|
||||
// Skip things that aren't allocated on the heap (and hence have zero bytes.)
|
||||
if (gc_nbytes((byte *)fun_bc->const_table[i]) == 0) {
|
||||
continue;
|
||||
}
|
||||
// Try to detect raw code.
|
||||
mp_raw_code_t* raw_code = MP_OBJ_TO_PTR(fun_bc->const_table[i]);
|
||||
if (raw_code->kind == MP_CODE_BYTECODE) {
|
||||
raw_code->data.u_byte.bytecode = gc_make_long_lived((byte*) raw_code->data.u_byte.bytecode);
|
||||
// TODO(tannewt): Do we actually want to recurse here?
|
||||
raw_code->data.u_byte.const_table = gc_make_long_lived((byte*) raw_code->data.u_byte.const_table);
|
||||
}
|
||||
((mp_uint_t *) fun_bc->const_table)[i] = (mp_uint_t) make_obj_long_lived(
|
||||
(mp_obj_t) fun_bc->const_table[i], max_depth - 1);
|
||||
|
||||
}
|
||||
fun_bc->const_table = gc_make_long_lived((mp_uint_t*) fun_bc->const_table);
|
||||
// extra_args stores keyword only argument default values.
|
||||
size_t words = gc_nbytes(fun_bc) / sizeof(mp_uint_t*);
|
||||
for (size_t i = 0; i < words - 4; i++) {
|
||||
if (fun_bc->extra_args[i] == NULL) {
|
||||
continue;
|
||||
}
|
||||
if (MP_OBJ_IS_TYPE(fun_bc->extra_args[i], &mp_type_dict)) {
|
||||
fun_bc->extra_args[i] = make_dict_long_lived(fun_bc->extra_args[i], max_depth - 1);
|
||||
} else {
|
||||
fun_bc->extra_args[i] = make_obj_long_lived(fun_bc->extra_args[i], max_depth - 1);
|
||||
}
|
||||
|
||||
}
|
||||
return gc_make_long_lived(fun_bc);
|
||||
}
|
||||
|
||||
mp_obj_property_t *make_property_long_lived(mp_obj_property_t *prop, uint8_t max_depth) {
|
||||
#ifndef MICROPY_ENABLE_GC
|
||||
return prop;
|
||||
#endif
|
||||
if (max_depth == 0) {
|
||||
return prop;
|
||||
}
|
||||
prop->proxy[0] = make_fun_bc_long_lived((mp_obj_fun_bc_t*) prop->proxy[0], max_depth - 1);
|
||||
prop->proxy[1] = make_fun_bc_long_lived((mp_obj_fun_bc_t*) prop->proxy[1], max_depth - 1);
|
||||
prop->proxy[2] = make_fun_bc_long_lived((mp_obj_fun_bc_t*) prop->proxy[2], max_depth - 1);
|
||||
return gc_make_long_lived(prop);
|
||||
}
|
||||
|
||||
mp_obj_dict_t *make_dict_long_lived(mp_obj_dict_t *dict, uint8_t max_depth) {
|
||||
#ifndef MICROPY_ENABLE_GC
|
||||
return dict;
|
||||
#endif
|
||||
if (dict == NULL || max_depth == 0) {
|
||||
return dict;
|
||||
}
|
||||
// Update all of the references first so that we reduce the chance of references to the old
|
||||
// copies.
|
||||
dict->map.table = gc_make_long_lived(dict->map.table);
|
||||
for (size_t i = 0; i < dict->map.alloc; i++) {
|
||||
if (MP_MAP_SLOT_IS_FILLED(&dict->map, i)) {
|
||||
mp_obj_t value = dict->map.table[i].value;
|
||||
dict->map.table[i].value = make_obj_long_lived(value, max_depth - 1);
|
||||
}
|
||||
}
|
||||
return gc_make_long_lived(dict);
|
||||
}
|
||||
|
||||
mp_obj_str_t *make_str_long_lived(mp_obj_str_t *str) {
|
||||
str->data = gc_make_long_lived((byte *) str->data);
|
||||
return gc_make_long_lived(str);
|
||||
}
|
||||
|
||||
mp_obj_t make_obj_long_lived(mp_obj_t obj, uint8_t max_depth){
|
||||
#ifndef MICROPY_ENABLE_GC
|
||||
return obj;
|
||||
#endif
|
||||
if (obj == NULL) {
|
||||
return obj;
|
||||
}
|
||||
if (MP_OBJ_IS_TYPE(obj, &mp_type_fun_bc)) {
|
||||
mp_obj_fun_bc_t *fun_bc = MP_OBJ_TO_PTR(obj);
|
||||
return MP_OBJ_FROM_PTR(make_fun_bc_long_lived(fun_bc, max_depth));
|
||||
} else if (MP_OBJ_IS_TYPE(obj, &mp_type_property)) {
|
||||
mp_obj_property_t *prop = MP_OBJ_TO_PTR(obj);
|
||||
return MP_OBJ_FROM_PTR(make_property_long_lived(prop, max_depth));
|
||||
} else if (MP_OBJ_IS_TYPE(obj, &mp_type_str)) {
|
||||
mp_obj_str_t *str = MP_OBJ_TO_PTR(obj);
|
||||
return MP_OBJ_FROM_PTR(make_str_long_lived(str));
|
||||
} else if (MP_OBJ_IS_TYPE(obj, &mp_type_type)) {
|
||||
// Types are already long lived during creation.
|
||||
return obj;
|
||||
} else {
|
||||
return gc_make_long_lived(obj);
|
||||
}
|
||||
}
|
43
py/gc_long_lived.h
Normal file
43
py/gc_long_lived.h
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* This file is part of the MicroPython project, http://micropython.org/
|
||||
*
|
||||
* The MIT License (MIT)
|
||||
*
|
||||
* Copyright (c) 2018 Scott Shawcroft for Adafruit Industries LLC
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
// These helpers move MicroPython objects and their sub-objects to the long lived portion of the
|
||||
// heap.
|
||||
|
||||
#ifndef MICROPY_INCLUDED_PY_GC_LONG_LIVED_H
|
||||
#define MICROPY_INCLUDED_PY_GC_LONG_LIVED_H
|
||||
|
||||
#include "py/objfun.h"
|
||||
#include "py/objproperty.h"
|
||||
#include "py/objstr.h"
|
||||
|
||||
mp_obj_fun_bc_t *make_fun_bc_long_lived(mp_obj_fun_bc_t *fun_bc, uint8_t max_depth);
|
||||
mp_obj_property_t *make_property_long_lived(mp_obj_property_t *prop, uint8_t max_depth);
|
||||
mp_obj_dict_t *make_dict_long_lived(mp_obj_dict_t *dict, uint8_t max_depth);
|
||||
mp_obj_str_t *make_str_long_lived(mp_obj_str_t *str);
|
||||
mp_obj_t make_obj_long_lived(mp_obj_t obj, uint8_t max_depth);
|
||||
|
||||
#endif // MICROPY_INCLUDED_PY_GC_LONG_LIVED_H
|
19
py/malloc.c
19
py/malloc.c
@ -53,12 +53,15 @@
|
||||
#undef malloc
|
||||
#undef free
|
||||
#undef realloc
|
||||
#define malloc(b) gc_alloc((b), false)
|
||||
#define malloc_with_finaliser(b) gc_alloc((b), true)
|
||||
#define malloc_ll(b, ll) gc_alloc((b), false, (ll))
|
||||
#define malloc_with_finaliser(b) gc_alloc((b), true, false)
|
||||
#define free gc_free
|
||||
#define realloc(ptr, n) gc_realloc(ptr, n, true)
|
||||
#define realloc_ext(ptr, n, mv) gc_realloc(ptr, n, mv)
|
||||
#else
|
||||
#define malloc_ll(b, ll) malloc(b)
|
||||
#define malloc_with_finaliser(b) malloc((b))
|
||||
|
||||
STATIC void *realloc_ext(void *ptr, size_t n_bytes, bool allow_move) {
|
||||
if (allow_move) {
|
||||
return realloc(ptr, n_bytes);
|
||||
@ -71,8 +74,8 @@ STATIC void *realloc_ext(void *ptr, size_t n_bytes, bool allow_move) {
|
||||
}
|
||||
#endif // MICROPY_ENABLE_GC
|
||||
|
||||
void *m_malloc(size_t num_bytes) {
|
||||
void *ptr = malloc(num_bytes);
|
||||
void *m_malloc(size_t num_bytes, bool long_lived) {
|
||||
void *ptr = malloc_ll(num_bytes, long_lived);
|
||||
if (ptr == NULL && num_bytes != 0) {
|
||||
m_malloc_fail(num_bytes);
|
||||
}
|
||||
@ -85,8 +88,8 @@ void *m_malloc(size_t num_bytes) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *m_malloc_maybe(size_t num_bytes) {
|
||||
void *ptr = malloc(num_bytes);
|
||||
void *m_malloc_maybe(size_t num_bytes, bool long_lived) {
|
||||
void *ptr = malloc_ll(num_bytes, long_lived);
|
||||
#if MICROPY_MEM_STATS
|
||||
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
|
||||
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
|
||||
@ -112,8 +115,8 @@ void *m_malloc_with_finaliser(size_t num_bytes) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void *m_malloc0(size_t num_bytes) {
|
||||
void *ptr = m_malloc(num_bytes);
|
||||
void *m_malloc0(size_t num_bytes, bool long_lived) {
|
||||
void *ptr = m_malloc(num_bytes, long_lived);
|
||||
if (ptr == NULL && num_bytes != 0) {
|
||||
m_malloc_fail(num_bytes);
|
||||
}
|
||||
|
21
py/misc.h
21
py/misc.h
@ -56,13 +56,18 @@ typedef unsigned int uint;
|
||||
|
||||
// TODO make a lazy m_renew that can increase by a smaller amount than requested (but by at least 1 more element)
|
||||
|
||||
#define m_new(type, num) ((type*)(m_malloc(sizeof(type) * (num))))
|
||||
#define m_new_maybe(type, num) ((type*)(m_malloc_maybe(sizeof(type) * (num))))
|
||||
#define m_new0(type, num) ((type*)(m_malloc0(sizeof(type) * (num))))
|
||||
#define m_new(type, num) ((type*)(m_malloc(sizeof(type) * (num), false)))
|
||||
#define m_new_ll(type, num) ((type*)(m_malloc(sizeof(type) * (num), true)))
|
||||
#define m_new_maybe(type, num) ((type*)(m_malloc_maybe(sizeof(type) * (num), false)))
|
||||
#define m_new_ll_maybe(type, num) ((type*)(m_malloc_maybe(sizeof(type) * (num), true)))
|
||||
#define m_new0(type, num) ((type*)(m_malloc0(sizeof(type) * (num), false)))
|
||||
#define m_new0_ll(type, num) ((type*)(m_malloc0(sizeof(type) * (num), true)))
|
||||
#define m_new_obj(type) (m_new(type, 1))
|
||||
#define m_new_ll_obj(type) (m_new_ll(type, 1))
|
||||
#define m_new_obj_maybe(type) (m_new_maybe(type, 1))
|
||||
#define m_new_obj_var(obj_type, var_type, var_num) ((obj_type*)m_malloc(sizeof(obj_type) + sizeof(var_type) * (var_num)))
|
||||
#define m_new_obj_var_maybe(obj_type, var_type, var_num) ((obj_type*)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num)))
|
||||
#define m_new_obj_var(obj_type, var_type, var_num) ((obj_type*)m_malloc(sizeof(obj_type) + sizeof(var_type) * (var_num), false))
|
||||
#define m_new_obj_var_maybe(obj_type, var_type, var_num) ((obj_type*)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num), false))
|
||||
#define m_new_ll_obj_var_maybe(obj_type, var_type, var_num) ((obj_type*)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num), true))
|
||||
#if MICROPY_ENABLE_FINALISER
|
||||
#define m_new_obj_with_finaliser(type) ((type*)(m_malloc_with_finaliser(sizeof(type))))
|
||||
#else
|
||||
@ -81,10 +86,10 @@ typedef unsigned int uint;
|
||||
#endif
|
||||
#define m_del_obj(type, ptr) (m_del(type, ptr, 1))
|
||||
|
||||
void *m_malloc(size_t num_bytes);
|
||||
void *m_malloc_maybe(size_t num_bytes);
|
||||
void *m_malloc(size_t num_bytes, bool long_lived);
|
||||
void *m_malloc_maybe(size_t num_bytes, bool long_lived);
|
||||
void *m_malloc_with_finaliser(size_t num_bytes);
|
||||
void *m_malloc0(size_t num_bytes);
|
||||
void *m_malloc0(size_t num_bytes, bool long_lived);
|
||||
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
|
||||
void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes);
|
||||
void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move);
|
||||
|
@ -79,7 +79,7 @@ STATIC mp_obj_t mp_builtin___build_class__(size_t n_args, const mp_obj_t *args)
|
||||
meta_args[2] = class_locals; // dict of members
|
||||
mp_obj_t new_class = mp_call_function_n_kw(meta, 3, 0, meta_args);
|
||||
|
||||
// store into cell if neede
|
||||
// store into cell if needed
|
||||
if (cell != mp_const_none) {
|
||||
mp_obj_cell_set(cell, new_class);
|
||||
}
|
||||
|
@ -76,21 +76,24 @@ typedef struct _mp_state_mem_t {
|
||||
byte *gc_pool_start;
|
||||
byte *gc_pool_end;
|
||||
|
||||
void *gc_lowest_long_lived_ptr;
|
||||
|
||||
int gc_stack_overflow;
|
||||
size_t gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
|
||||
size_t *gc_sp;
|
||||
uint16_t gc_lock_depth;
|
||||
|
||||
// This variable controls auto garbage collection. If set to 0 then the
|
||||
// This variable controls auto garbage collection. If set to false then the
|
||||
// GC won't automatically run when gc_alloc can't find enough blocks. But
|
||||
// you can still allocate/free memory and also explicitly call gc_collect.
|
||||
uint16_t gc_auto_collect_enabled;
|
||||
bool gc_auto_collect_enabled;
|
||||
|
||||
#if MICROPY_GC_ALLOC_THRESHOLD
|
||||
size_t gc_alloc_amount;
|
||||
size_t gc_alloc_threshold;
|
||||
#endif
|
||||
|
||||
size_t gc_first_free_atb_index;
|
||||
size_t gc_last_free_atb_index;
|
||||
|
||||
#if MICROPY_PY_GC_COLLECT_RETVAL
|
||||
|
1
py/obj.h
1
py/obj.h
@ -816,6 +816,7 @@ typedef struct _mp_obj_module_t {
|
||||
mp_obj_dict_t *globals;
|
||||
} mp_obj_module_t;
|
||||
mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t self_in);
|
||||
void mp_obj_module_set_globals(mp_obj_t self_in, mp_obj_dict_t *globals);
|
||||
// check if given module object is a package
|
||||
bool mp_obj_is_package(mp_obj_t module);
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "py/gc.h"
|
||||
#include "py/objmodule.h"
|
||||
#include "py/runtime.h"
|
||||
#include "py/builtin.h"
|
||||
@ -84,8 +85,9 @@ STATIC void module_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
|
||||
mp_obj_dict_delete(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr));
|
||||
} else {
|
||||
// store attribute
|
||||
mp_obj_t long_lived = gc_make_long_lived(dest[1]);
|
||||
// TODO CPython allows STORE_ATTR to a module, but is this the correct implementation?
|
||||
mp_obj_dict_store(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr), dest[1]);
|
||||
mp_obj_dict_store(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr), long_lived);
|
||||
}
|
||||
dest[0] = MP_OBJ_NULL; // indicate success
|
||||
}
|
||||
@ -108,9 +110,9 @@ mp_obj_t mp_obj_new_module(qstr module_name) {
|
||||
}
|
||||
|
||||
// create new module object
|
||||
mp_obj_module_t *o = m_new_obj(mp_obj_module_t);
|
||||
mp_obj_module_t *o = m_new_ll_obj(mp_obj_module_t);
|
||||
o->base.type = &mp_type_module;
|
||||
o->globals = MP_OBJ_TO_PTR(mp_obj_new_dict(MICROPY_MODULE_DICT_SIZE));
|
||||
o->globals = MP_OBJ_TO_PTR(gc_make_long_lived(mp_obj_new_dict(MICROPY_MODULE_DICT_SIZE)));
|
||||
|
||||
// store __name__ entry in the module
|
||||
mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(module_name));
|
||||
@ -128,6 +130,12 @@ mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t self_in) {
|
||||
return self->globals;
|
||||
}
|
||||
|
||||
void mp_obj_module_set_globals(mp_obj_t self_in, mp_obj_dict_t *globals) {
|
||||
assert(MP_OBJ_IS_TYPE(self_in, &mp_type_module));
|
||||
mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
|
||||
self->globals = globals;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
// Global module table and related functions
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "py/gc_long_lived.h"
|
||||
#include "py/objtype.h"
|
||||
#include "py/runtime.h"
|
||||
|
||||
@ -960,7 +961,7 @@ STATIC void type_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
|
||||
mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
|
||||
// note that locals_map may be in ROM, so add will fail in that case
|
||||
if (elem != NULL) {
|
||||
elem->value = dest[1];
|
||||
elem->value = make_obj_long_lived(dest[1], 10);
|
||||
dest[0] = MP_OBJ_NULL; // indicate success
|
||||
}
|
||||
}
|
||||
@ -1002,7 +1003,7 @@ mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict)
|
||||
}
|
||||
}
|
||||
|
||||
mp_obj_type_t *o = m_new0(mp_obj_type_t, 1);
|
||||
mp_obj_type_t *o = m_new0_ll(mp_obj_type_t, 1);
|
||||
o->base.type = &mp_type_type;
|
||||
o->name = name;
|
||||
o->print = instance_print;
|
||||
@ -1030,7 +1031,7 @@ mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict)
|
||||
}
|
||||
}
|
||||
|
||||
o->locals_dict = MP_OBJ_TO_PTR(locals_dict);
|
||||
o->locals_dict = make_dict_long_lived(locals_dict, 10);
|
||||
|
||||
const mp_obj_type_t *native_base;
|
||||
size_t num_native_bases = instance_count_native_bases(o, &native_base);
|
||||
|
1
py/py.mk
1
py/py.mk
@ -110,6 +110,7 @@ PY_O_BASENAME = \
|
||||
nlrsetjmp.o \
|
||||
malloc.o \
|
||||
gc.o \
|
||||
gc_long_lived.o \
|
||||
qstr.o \
|
||||
vstr.o \
|
||||
mpprint.o \
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "py/gc.h"
|
||||
#include "py/mpstate.h"
|
||||
#include "py/qstr.h"
|
||||
#include "py/gc.h"
|
||||
@ -143,7 +144,7 @@ STATIC qstr qstr_add(const byte *q_ptr) {
|
||||
|
||||
// make sure we have room in the pool for a new qstr
|
||||
if (MP_STATE_VM(last_pool)->len >= MP_STATE_VM(last_pool)->alloc) {
|
||||
qstr_pool_t *pool = m_new_obj_var_maybe(qstr_pool_t, const char*, MP_STATE_VM(last_pool)->alloc * 2);
|
||||
qstr_pool_t *pool = m_new_ll_obj_var_maybe(qstr_pool_t, const char*, MP_STATE_VM(last_pool)->alloc * 2);
|
||||
if (pool == NULL) {
|
||||
QSTR_EXIT();
|
||||
m_malloc_fail(MP_STATE_VM(last_pool)->alloc * 2);
|
||||
@ -213,10 +214,10 @@ qstr qstr_from_strn(const char *str, size_t len) {
|
||||
if (al < MICROPY_ALLOC_QSTR_CHUNK_INIT) {
|
||||
al = MICROPY_ALLOC_QSTR_CHUNK_INIT;
|
||||
}
|
||||
MP_STATE_VM(qstr_last_chunk) = m_new_maybe(byte, al);
|
||||
MP_STATE_VM(qstr_last_chunk) = m_new_ll_maybe(byte, al);
|
||||
if (MP_STATE_VM(qstr_last_chunk) == NULL) {
|
||||
// failed to allocate a large chunk so try with exact size
|
||||
MP_STATE_VM(qstr_last_chunk) = m_new_maybe(byte, n_bytes);
|
||||
MP_STATE_VM(qstr_last_chunk) = m_new_ll_maybe(byte, n_bytes);
|
||||
if (MP_STATE_VM(qstr_last_chunk) == NULL) {
|
||||
QSTR_EXIT();
|
||||
m_malloc_fail(n_bytes);
|
||||
@ -258,7 +259,7 @@ qstr qstr_build_end(byte *q_ptr) {
|
||||
mp_uint_t hash = qstr_compute_hash(Q_GET_DATA(q_ptr), len);
|
||||
Q_SET_HASH(q_ptr, hash);
|
||||
q_ptr[MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN + len] = '\0';
|
||||
q = qstr_add(q_ptr);
|
||||
q = qstr_add(gc_make_long_lived(q_ptr));
|
||||
} else {
|
||||
m_del(byte, q_ptr, Q_GET_ALLOC(q_ptr));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user