wip; remove long-lived functionality; remove PR#2614

Trinket M0 compile has no compilation errors, but has link errors to fix.
This commit is contained in:
Dan Halbert 2023-08-08 20:41:17 -04:00
parent 0d2c3c3f08
commit 2ff8667e75
41 changed files with 244 additions and 605 deletions

View File

@ -55,8 +55,7 @@ void common_hal_bleio_characteristic_buffer_construct(bleio_characteristic_buffe
self->characteristic = characteristic;
self->timeout_ms = timeout * 1000;
// This is a macro.
// true means long-lived, so it won't be moved.
ringbuf_alloc(&self->ringbuf, buffer_size, true);
ringbuf_alloc(&self->ringbuf, buffer_size);
bleio_characteristic_set_observer(characteristic, self);
}

View File

@ -33,6 +33,8 @@
#include "supervisor/shared/translate/translate.h"
#if MICROPY_PY_UBINASCII
static void check_not_unicode(const mp_obj_t arg) {
#if MICROPY_CPYTHON_COMPAT
if (mp_obj_is_str(arg)) {
@ -40,8 +42,6 @@ static void check_not_unicode(const mp_obj_t arg) {
}
#endif
}
#if MICROPY_PY_UBINASCII
STATIC mp_obj_t mod_binascii_hexlify(size_t n_args, const mp_obj_t *args) {
// First argument is the data to convert.
// Second argument is an optional separator to be used between values.

View File

@ -232,7 +232,7 @@ STATIC mp_obj_t file_open(fs_user_mount_t *vfs, const mp_obj_type_t *type, mp_ar
DWORD size = (temp_table[0] + 1) * 2;
// Now allocate the size and construct the map.
o->fp.cltbl = m_malloc_maybe(size * sizeof(DWORD), false);
o->fp.cltbl = m_malloc_maybe(size * sizeof(DWORD));
if (o->fp.cltbl != NULL) {
o->fp.cltbl[0] = size;
res = f_lseek(&o->fp, CREATE_LINKMAP);

View File

@ -222,14 +222,7 @@ void common_hal_busio_uart_construct(busio_uart_obj_t *self,
if (NULL != receiver_buffer) {
self->buffer = receiver_buffer;
} else {
// Initially allocate the UART's buffer in the long-lived part of the
// heap. UARTs are generally long-lived objects, but the "make long-
// lived" machinery is incapable of moving internal pointers like
// self->buffer, so do it manually. (However, as long as internal
// pointers like this are NOT moved, allocating the buffer
// in the long-lived pool is not strictly necessary)
self->buffer = (uint8_t *)gc_alloc(self->buffer_length * sizeof(uint8_t), false, true);
self->buffer = (uint8_t *)gc_alloc(self->buffer_length * sizeof(uint8_t), false);
if (self->buffer == NULL) {
common_hal_busio_uart_deinit(self);
m_malloc_fail(self->buffer_length * sizeof(uint8_t));

View File

@ -212,13 +212,7 @@ void common_hal_busio_uart_construct(busio_uart_obj_t *self,
if (receiver_buffer != NULL) {
ringbuf_init(&self->ringbuf, receiver_buffer, receiver_buffer_size);
} else {
// Initially allocate the UART's buffer in the long-lived part of the
// heap. UARTs are generally long-lived objects, but the "make long-
// lived" machinery is incapable of moving internal pointers like
// self->buffer, so do it manually. (However, as long as internal
// pointers like this are NOT moved, allocating the buffer
// in the long-lived pool is not strictly necessary)
if (!ringbuf_alloc(&self->ringbuf, receiver_buffer_size, true)) {
if (!ringbuf_alloc(&self->ringbuf, receiver_buffer_size)) {
m_malloc_fail(receiver_buffer_size);
}
}

View File

@ -348,7 +348,7 @@ void common_hal_busio_uart_construct(busio_uart_obj_t *self,
if (self->rx != NULL) {
if (receiver_buffer == NULL) {
self->ringbuf = gc_alloc(receiver_buffer_size, false, true /*long-lived*/);
self->ringbuf = gc_alloc(receiver_buffer_size, false);
} else {
self->ringbuf = receiver_buffer;
}

View File

@ -908,12 +908,11 @@ void common_hal_bleio_adapter_start_advertising(bleio_adapter_obj_t *self, bool
}
// The advertising data buffers must not move, because the SoftDevice depends on them.
// So make them long-lived and reuse them onwards.
if (self->advertising_data == NULL) {
self->advertising_data = (uint8_t *)gc_alloc(BLE_GAP_ADV_SET_DATA_SIZE_EXTENDED_MAX_SUPPORTED * sizeof(uint8_t), false, true);
self->advertising_data = (uint8_t *)gc_alloc(BLE_GAP_ADV_SET_DATA_SIZE_EXTENDED_MAX_SUPPORTED * sizeof(uint8_t), false);
}
if (self->scan_response_data == NULL) {
self->scan_response_data = (uint8_t *)gc_alloc(BLE_GAP_ADV_SET_DATA_SIZE_EXTENDED_MAX_SUPPORTED * sizeof(uint8_t), false, true);
self->scan_response_data = (uint8_t *)gc_alloc(BLE_GAP_ADV_SET_DATA_SIZE_EXTENDED_MAX_SUPPORTED * sizeof(uint8_t), false);
}
memcpy(self->advertising_data, advertising_data_bufinfo->buf, advertising_data_bufinfo->len);

View File

@ -45,7 +45,7 @@ extern bleio_connection_internal_t bleio_connections[BLEIO_TOTAL_CONNECTION_COUN
typedef struct {
mp_obj_base_t base;
// Pointer to buffers we maintain so that the data is long lived.
// We create buffers and copy the advertising data so it will live for as long as we need.
uint8_t *advertising_data;
uint8_t *scan_response_data;
// Pointer to current data.

View File

@ -215,13 +215,7 @@ void common_hal_busio_uart_construct(busio_uart_obj_t *self,
if (receiver_buffer != NULL) {
ringbuf_init(&self->ringbuf, receiver_buffer, receiver_buffer_size);
} else {
// Initially allocate the UART's buffer in the long-lived part of the
// heap. UARTs are generally long-lived objects, but the "make long-
// lived" machinery is incapable of moving internal pointers like
// self->buffer, so do it manually. (However, as long as internal
// pointers like this are NOT moved, allocating the buffer
// in the long-lived pool is not strictly necessary)
if (!ringbuf_alloc(&self->ringbuf, receiver_buffer_size, true)) {
if (!ringbuf_alloc(&self->ringbuf, receiver_buffer_size)) {
nrfx_uarte_uninit(self->uarte);
m_malloc_fail(receiver_buffer_size);
}

View File

@ -159,13 +159,7 @@ void common_hal_busio_uart_construct(busio_uart_obj_t *self,
if (receiver_buffer != NULL) {
ringbuf_init(&self->ringbuf, receiver_buffer, receiver_buffer_size);
} else {
// Initially allocate the UART's buffer in the long-lived part of the
// heap. UARTs are generally long-lived objects, but the "make long-
// lived" machinery is incapable of moving internal pointers like
// self->buffer, so do it manually. (However, as long as internal
// pointers like this are NOT moved, allocating the buffer
// in the long-lived pool is not strictly necessary)
if (!ringbuf_alloc(&self->ringbuf, receiver_buffer_size, true)) {
if (!ringbuf_alloc(&self->ringbuf, receiver_buffer_size)) {
uart_deinit(self->uart);
m_malloc_fail(receiver_buffer_size);
}

View File

@ -733,8 +733,7 @@ socketpool_socket_obj_t *common_hal_socketpool_socket(socketpool_socketpool_obj_
mp_raise_NotImplementedError(translate("Only IPv4 sockets supported"));
}
// we must allocate sockets long-lived because we depend on their object-identity
socketpool_socket_obj_t *socket = m_new_ll_obj_with_finaliser(socketpool_socket_obj_t);
socketpool_socket_obj_t *socket = m_new_obj_with_finaliser(socketpool_socket_obj_t);
socket->base.type = &socketpool_socket_type;
if (!socketpool_socket(self, family, type, socket)) {

View File

@ -218,13 +218,7 @@ void common_hal_busio_uart_construct(busio_uart_obj_t *self,
if (receiver_buffer != NULL) {
ringbuf_init(&self->ringbuf, receiver_buffer, receiver_buffer_size);
} else {
// Initially allocate the UART's buffer in the long-lived part of the
// heap. UARTs are generally long-lived objects, but the "make long-
// lived" machinery is incapable of moving internal pointers like
// self->buffer, so do it manually. (However, as long as internal
// pointers like this are NOT moved, allocating the buffer
// in the long-lived pool is not strictly necessary)
if (!ringbuf_alloc(&self->ringbuf, receiver_buffer_size, true)) {
if (!ringbuf_alloc(&self->ringbuf, receiver_buffer_size)) {
m_malloc_fail(receiver_buffer_size);
}
}

View File

@ -31,7 +31,6 @@
#include <assert.h>
#include "py/compile.h"
#include "py/gc_long_lived.h"
#include "py/gc.h"
#include "py/objmodule.h"
#include "py/persistentcode.h"
@ -159,7 +158,6 @@ STATIC void do_load_from_lexer(mp_module_context_t *context, mp_lexer_t *lex) {
// parse, compile and execute the module in its context
mp_obj_dict_t *mod_globals = context->module.globals;
mp_parse_compile_execute(lex, MP_PARSE_FILE_INPUT, mod_globals, mod_globals);
mp_obj_module_set_globals(module_obj, make_dict_long_lived(mod_globals, 10));
}
#endif
@ -189,8 +187,6 @@ STATIC void do_execute_raw_code(mp_module_context_t *context, const mp_raw_code_
// finish nlr block, restore context
nlr_pop();
mp_obj_module_set_globals(module_obj,
make_dict_long_lived(mp_obj_module_get_globals(module_obj), 10));
mp_globals_set(old_globals);
mp_locals_set(old_locals);
} else {

229
py/gc.c
View File

@ -62,6 +62,9 @@
// detect untraced object still in use
#define CLEAR_ON_SWEEP (0)
#define WORDS_PER_BLOCK ((MICROPY_BYTES_PER_GC_BLOCK) / MP_BYTES_PER_OBJ_WORD)
#define BYTES_PER_BLOCK (MICROPY_BYTES_PER_GC_BLOCK)
// ATB = allocation table byte
// 0b00 = FREE -- free block
// 0b01 = HEAD -- head of a chain of blocks
@ -74,6 +77,15 @@
#define AT_MARK (3)
#define BLOCKS_PER_ATB (4)
#define ATB_MASK_0 (0x03)
#define ATB_MASK_1 (0x0c)
#define ATB_MASK_2 (0x30)
#define ATB_MASK_3 (0xc0)
#define ATB_0_IS_FREE(a) (((a) & ATB_MASK_0) == 0)
#define ATB_1_IS_FREE(a) (((a) & ATB_MASK_1) == 0)
#define ATB_2_IS_FREE(a) (((a) & ATB_MASK_2) == 0)
#define ATB_3_IS_FREE(a) (((a) & ATB_MASK_3) == 0)
#define BLOCK_SHIFT(block) (2 * ((block) & (BLOCKS_PER_ATB - 1)))
#define ATB_GET_KIND(block) ((MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
@ -130,6 +142,7 @@ void gc_init(void *start, void *end) {
// => T = A * (1 + BLOCKS_PER_ATB / BLOCKS_PER_FTB + BLOCKS_PER_ATB * BYTES_PER_BLOCK)
size_t total_byte_len = (byte *)end - (byte *)start;
#if MICROPY_ENABLE_FINALISER
// CIRCUITPY: https://github.com/adafruit/circuitpython/pull/5245 (bug fix)
MP_STATE_MEM(gc_alloc_table_byte_len) = (total_byte_len - 1) * MP_BITS_PER_BYTE / (MP_BITS_PER_BYTE + MP_BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + MP_BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK);
#else
MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len / (1 + MP_BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
@ -138,7 +151,8 @@ void gc_init(void *start, void *end) {
MP_STATE_MEM(gc_alloc_table_start) = (byte *)start;
#if MICROPY_ENABLE_FINALISER
MP_STATE_MEM(gc_finaliser_table_start) = MP_STATE_MEM(gc_alloc_table_start) + MP_STATE_MEM(gc_alloc_table_byte_len) + 1;
size_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
MP_STATE_MEM(gc_finaliser_table_start) = MP_STATE_MEM(gc_alloc_table_start) + MP_STATE_MEM(gc_alloc_table_byte_len);
#endif
size_t gc_pool_block_len = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
@ -146,34 +160,27 @@ void gc_init(void *start, void *end) {
MP_STATE_MEM(gc_pool_end) = end;
#if MICROPY_ENABLE_FINALISER
size_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
// CIRCUITPY: https://github.com/adafruit/circuitpython/pull/6397 (compiler diagnostic)
(void)gc_finaliser_table_byte_len; // avoid unused variable diagnostic if asserts are disabled
assert(MP_STATE_MEM(gc_pool_start) >= MP_STATE_MEM(gc_finaliser_table_start) + gc_finaliser_table_byte_len);
#endif
// Clear ATBs & finalisers (if enabled). This also clears the extra byte
// which appears between ATBs and finalisers that ensures every chain in
// the ATB terminates, rather than erroneously using bits from the
// finalisers.
memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_pool_start) - MP_STATE_MEM(gc_alloc_table_start));
// clear ATBs
memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_alloc_table_byte_len));
// Set first free ATB index to the start of the heap.
for (size_t i = 0; i < MICROPY_ATB_INDICES; i++) {
MP_STATE_MEM(gc_first_free_atb_index)[i] = 0;
}
#if MICROPY_ENABLE_FINALISER
// clear FTBs
memset(MP_STATE_MEM(gc_finaliser_table_start), 0, gc_finaliser_table_byte_len);
#endif
// Set last free ATB index to the end of the heap.
MP_STATE_MEM(gc_last_free_atb_index) = MP_STATE_MEM(gc_alloc_table_byte_len) - 1;
// Set the lowest long lived ptr to the end of the heap to start. This will be lowered as long
// lived objects are allocated.
MP_STATE_MEM(gc_lowest_long_lived_ptr) = (void *)PTR_FROM_BLOCK(MP_STATE_MEM(gc_alloc_table_byte_len * BLOCKS_PER_ATB));
// set last free ATB index to start of heap
MP_STATE_MEM(gc_last_free_atb_index) = 0;
// unlock the GC
MP_STATE_THREAD(gc_lock_depth) = 0;
// allow auto collection
MP_STATE_MEM(gc_auto_collect_enabled) = true;
MP_STATE_MEM(gc_auto_collect_enabled) = 1;
#if MICROPY_GC_ALLOC_THRESHOLD
// by default, maxuint for gc threshold, effectively turning gc-by-threshold off
@ -219,6 +226,8 @@ bool gc_is_locked(void) {
return MP_STATE_THREAD(gc_lock_depth) != 0;
}
// CIRCUITPY: VERIFY_PTR moved to gc.h to make it available elsewhere.
#ifndef TRACE_MARK
#if DEBUG_PRINT
#define TRACE_MARK(block, ptr) DEBUG_printf("gc_mark(%p)\n", ptr)
@ -231,7 +240,7 @@ bool gc_is_locked(void) {
// children: mark the unmarked child blocks and put those newly marked
// blocks on the stack. When all children have been checked, pop off the
// topmost block on the stack and repeat with that one.
// We don't instrument these functions because they occur a lot during GC and
// CIRCUITPY: We don't instrument these functions because they occur a lot during GC and
// fill up the output buffer quickly.
STATIC void MP_NO_INSTRUMENT PLACE_IN_ITCM(gc_mark_subtree)(size_t block) {
// Start with the block passed in the argument.
@ -412,7 +421,7 @@ void gc_collect_root(void **ptrs, size_t len) {
for (size_t i = 0; i < len; i++) {
MICROPY_GC_HOOK_LOOP
void *ptr = gc_get_ptr(ptrs, i);
// CIRCUITPY changed i PR #1816
// CIRCUITPY changed in PR #1816
gc_mark(ptr);
}
}
@ -420,10 +429,7 @@ void gc_collect_root(void **ptrs, size_t len) {
void gc_collect_end(void) {
gc_deal_with_stack_overflow();
gc_sweep();
for (size_t i = 0; i < MICROPY_ATB_INDICES; i++) {
MP_STATE_MEM(gc_first_free_atb_index)[i] = 0;
}
MP_STATE_MEM(gc_last_free_atb_index) = MP_STATE_MEM(gc_alloc_table_byte_len) - 1;
MP_STATE_MEM(gc_last_free_atb_index) = 0;
MP_STATE_THREAD(gc_lock_depth)--;
GC_EXIT();
}
@ -503,9 +509,7 @@ bool gc_alloc_possible(void) {
return MP_STATE_MEM(gc_pool_start) != 0;
}
// We place long lived objects at the end of the heap rather than the start. This reduces
// fragmentation by localizing the heap churn to one portion of memory (the start of the heap.)
void *gc_alloc(size_t n_bytes, unsigned int alloc_flags, bool long_lived) {
void *gc_alloc(size_t n_bytes, unsigned int alloc_flags) {
bool has_finaliser = alloc_flags & GC_ALLOC_FLAG_HAS_FINALISER;
size_t n_blocks = ((n_bytes + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1))) / BYTES_PER_BLOCK;
DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks);
@ -526,11 +530,11 @@ void *gc_alloc(size_t n_bytes, unsigned int alloc_flags, bool long_lived) {
GC_ENTER();
size_t found_block = 0xffffffff;
size_t i;
size_t end_block;
size_t start_block;
size_t n_free;
bool collected = !MP_STATE_MEM(gc_auto_collect_enabled);
int collected = !MP_STATE_MEM(gc_auto_collect_enabled);
#if MICROPY_GC_ALLOC_THRESHOLD
if (!collected && MP_STATE_MEM(gc_alloc_amount) >= MP_STATE_MEM(gc_alloc_threshold)) {
@ -541,49 +545,18 @@ void *gc_alloc(size_t n_bytes, unsigned int alloc_flags, bool long_lived) {
}
#endif
bool keep_looking = true;
for (;;) {
// When we start searching on the other side of the crossover block we make sure to
// perform a collect. That way we'll get the closest free block in our section.
size_t crossover_block = BLOCK_FROM_PTR(MP_STATE_MEM(gc_lowest_long_lived_ptr));
while (keep_looking) {
int8_t direction = 1;
size_t bucket = MIN(n_blocks, MICROPY_ATB_INDICES) - 1;
size_t first_free = MP_STATE_MEM(gc_first_free_atb_index)[bucket];
size_t start = first_free;
if (long_lived) {
direction = -1;
start = MP_STATE_MEM(gc_last_free_atb_index);
}
n_free = 0;
// look for a run of n_blocks available blocks
for (size_t i = start; keep_looking && first_free <= i && i <= MP_STATE_MEM(gc_last_free_atb_index); i += direction) {
byte a = MP_STATE_MEM(gc_alloc_table_start)[i];
// Four ATB states are packed into a single byte.
int j = 0;
if (direction == -1) {
j = 3;
}
for (; keep_looking && 0 <= j && j <= 3; j += direction) {
if ((a & (0x3 << (j * 2))) == 0) {
if (++n_free >= n_blocks) {
found_block = i * BLOCKS_PER_ATB + j;
keep_looking = false;
}
} else {
if (!collected) {
size_t block = i * BLOCKS_PER_ATB + j;
if ((direction == 1 && block >= crossover_block) ||
(direction == -1 && block < crossover_block)) {
keep_looking = false;
}
}
n_free = 0;
}
}
}
if (n_free >= n_blocks) {
break;
for (i = MP_STATE_MEM(gc_last_free_atb_index); i < MP_STATE_MEM(gc_alloc_table_byte_len); i++) {
byte a = MP_STATE_MEM(gc_alloc_table_start)[i];
// *FORMAT-OFF*
if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; }
if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; }
if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; }
// *FORMAT-ON*
}
GC_EXIT();
@ -593,35 +566,26 @@ void *gc_alloc(size_t n_bytes, unsigned int alloc_flags, bool long_lived) {
}
DEBUG_printf("gc_alloc(" UINT_FMT "): no free mem, triggering GC\n", n_bytes);
gc_collect();
collected = true;
// Try again since we've hopefully freed up space.
keep_looking = true;
collected = 1;
GC_ENTER();
}
assert(found_block != 0xffffffff);
// Found free space ending at found_block inclusive.
// Also, set last free ATB index to block after last block we found, for start of
// next scan. Also, whenever we free or shrink a block we must check if this index needs
// adjusting (see gc_realloc and gc_free).
if (!long_lived) {
end_block = found_block;
start_block = found_block - n_free + 1;
if (n_blocks < MICROPY_ATB_INDICES) {
size_t next_free_atb = (found_block + n_blocks) / BLOCKS_PER_ATB;
// Update all atb indices for larger blocks too.
for (size_t i = n_blocks - 1; i < MICROPY_ATB_INDICES; i++) {
MP_STATE_MEM(gc_first_free_atb_index)[i] = next_free_atb;
}
}
} else {
start_block = found_block;
end_block = found_block + n_free - 1;
// Always update the bounds of the long lived area because we assume it is contiguous. (It
// can still be reset by a sweep.)
MP_STATE_MEM(gc_last_free_atb_index) = (found_block - 1) / BLOCKS_PER_ATB;
// found, ending at block i inclusive
found:
// get starting and end blocks, both inclusive
end_block = i;
start_block = i - n_free + 1;
// Set last free ATB index to block after last block we found, for start of
// next scan. To reduce fragmentation, we only do this if we were looking
// for a single free block, which guarantees that there are no free blocks
// before this one. Also, whenever we free or shink a block we must check
// if this index needs adjusting (see gc_realloc and gc_free).
if (n_free == 1) {
MP_STATE_MEM(gc_last_free_atb_index) = (i + 1) / BLOCKS_PER_ATB;
}
#ifdef LOG_HEAP_ACTIVITY
gc_log_change(start_block, end_block - start_block + 1);
#endif
@ -640,13 +604,6 @@ void *gc_alloc(size_t n_bytes, unsigned int alloc_flags, bool long_lived) {
void *ret_ptr = (void *)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK);
DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
// If the allocation was long live then update the lowest value. Its used to trigger early
// collects when allocations fail in their respective section. Its also used to ignore calls to
// gc_make_long_lived where the pointer is already in the long lived section.
if (long_lived && ret_ptr < MP_STATE_MEM(gc_lowest_long_lived_ptr)) {
MP_STATE_MEM(gc_lowest_long_lived_ptr) = ret_ptr;
}
#if MICROPY_GC_ALLOC_THRESHOLD
MP_STATE_MEM(gc_alloc_amount) += n_blocks;
#endif
@ -714,42 +671,34 @@ void gc_free(void *ptr) {
if (ptr == NULL) {
GC_EXIT();
} else {
// CIRCUITPY extra checking
if (MP_STATE_MEM(gc_pool_start) == 0) {
reset_into_safe_mode(SAFE_MODE_GC_ALLOC_OUTSIDE_VM);
}
// get the GC block number corresponding to this pointer
assert(VERIFY_PTR(ptr));
size_t start_block = BLOCK_FROM_PTR(ptr);
assert(ATB_GET_KIND(start_block) == AT_HEAD);
size_t block = BLOCK_FROM_PTR(ptr);
assert(ATB_GET_KIND(block) == AT_HEAD);
#if MICROPY_ENABLE_FINALISER
FTB_CLEAR(start_block);
FTB_CLEAR(block);
#endif
// free head and all of its tail blocks
// set the last_free pointer to this block if it's earlier in the heap
if (block / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
MP_STATE_MEM(gc_last_free_atb_index) = block / BLOCKS_PER_ATB;
}
#ifdef LOG_HEAP_ACTIVITY
gc_log_change(start_block, 0);
#endif
size_t block = start_block;
// free head and all of its tail blocks
do {
ATB_ANY_TO_FREE(block);
block += 1;
} while (ATB_GET_KIND(block) == AT_TAIL);
// Update the first free pointer for our size only. Not much calls gc_free directly so there
// is decent chance we'll want to allocate this size again. By only updating the specific
// size we don't risk something smaller fitting in.
size_t n_blocks = block - start_block;
size_t bucket = MIN(n_blocks, MICROPY_ATB_INDICES) - 1;
size_t new_free_atb = start_block / BLOCKS_PER_ATB;
if (new_free_atb < MP_STATE_MEM(gc_first_free_atb_index)[bucket]) {
MP_STATE_MEM(gc_first_free_atb_index)[bucket] = new_free_atb;
}
// set the last_free pointer to this block if it's earlier in the heap
if (new_free_atb > MP_STATE_MEM(gc_last_free_atb_index)) {
MP_STATE_MEM(gc_last_free_atb_index) = new_free_atb;
}
GC_EXIT();
#if EXTENSIVE_HEAP_PROFILING
@ -795,33 +744,6 @@ bool gc_has_finaliser(const void *ptr) {
return false;
}
void *gc_make_long_lived(void *old_ptr) {
// If its already in the long lived section then don't bother moving it.
if (old_ptr >= MP_STATE_MEM(gc_lowest_long_lived_ptr)) {
return old_ptr;
}
size_t n_bytes = gc_nbytes(old_ptr);
if (n_bytes == 0) {
return old_ptr;
}
bool has_finaliser = gc_has_finaliser(old_ptr);
// Try and find a new area in the long lived section to copy the memory to.
void *new_ptr = gc_alloc(n_bytes, has_finaliser, true);
if (new_ptr == NULL) {
return old_ptr;
} else if (old_ptr > new_ptr) {
// Return the old pointer if the new one is lower in the heap and free the new space.
gc_free(new_ptr);
return old_ptr;
}
// We copy everything over and let the garbage collection process delete the old copy. That way
// we ensure we don't delete memory that has a second reference. (Though if there is we may
// confuse things when its mutable.)
memcpy(new_ptr, old_ptr, n_bytes);
return new_ptr;
}
#if 0
// old, simple realloc that didn't expand memory in place
void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
@ -854,7 +776,7 @@ void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
// check for pure allocation
if (ptr_in == NULL) {
return gc_alloc(n_bytes, false, false);
return gc_alloc(n_bytes, false);
}
// check for pure free
@ -919,13 +841,8 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
}
// set the last_free pointer to end of this block if it's earlier in the heap
size_t new_free_atb = (block + new_blocks) / BLOCKS_PER_ATB;
size_t bucket = MIN(n_blocks - new_blocks, MICROPY_ATB_INDICES) - 1;
if (new_free_atb < MP_STATE_MEM(gc_first_free_atb_index)[bucket]) {
MP_STATE_MEM(gc_first_free_atb_index)[bucket] = new_free_atb;
}
if (new_free_atb > MP_STATE_MEM(gc_last_free_atb_index)) {
MP_STATE_MEM(gc_last_free_atb_index) = new_free_atb;
if ((block + new_blocks) / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
MP_STATE_MEM(gc_last_free_atb_index) = (block + new_blocks) / BLOCKS_PER_ATB;
}
GC_EXIT();
@ -992,7 +909,7 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
}
// can't resize inplace; try to find a new contiguous chain
void *ptr_out = gc_alloc(n_bytes, ftb_state, false);
void *ptr_out = gc_alloc(n_bytes, ftb_state);
// check that the alloc succeeded
if (ptr_out == NULL) {
@ -1025,7 +942,7 @@ bool gc_never_free(void *ptr) {
last_reference_block = current_reference_block; // keep a record of last "proper" reference block
current_reference_block = current_reference_block[0];
}
void **next_block = gc_alloc(BYTES_PER_BLOCK, false, true);
void **next_block = gc_alloc(BYTES_PER_BLOCK, false);
if (next_block == NULL) {
return false;
}

11
py/gc.h
View File

@ -26,15 +26,13 @@
#ifndef MICROPY_INCLUDED_PY_GC_H
#define MICROPY_INCLUDED_PY_GC_H
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#include "py/mpconfig.h"
#include "py/mpstate.h"
#include "py/misc.h"
#define WORDS_PER_BLOCK ((MICROPY_BYTES_PER_GC_BLOCK) / MP_BYTES_PER_OBJ_WORD)
#define BYTES_PER_BLOCK (MICROPY_BYTES_PER_GC_BLOCK)
#define HEAP_PTR(ptr) ( \
MP_STATE_MEM(gc_pool_start) != 0 /* Not on the heap if it isn't inited */ \
&& ptr >= (void *)MP_STATE_MEM(gc_pool_start) /* must be above start of pool */ \
@ -43,7 +41,7 @@
// ptr should be of type void*
#define VERIFY_PTR(ptr) ( \
((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) == 0 /* must be aligned on a block */ \
((uintptr_t)(ptr) & (MICROPY_BYTES_PER_GC_BLOCK - 1)) == 0 /* must be aligned on a block */ \
&& HEAP_PTR(ptr) \
)
@ -73,11 +71,10 @@ enum {
GC_ALLOC_FLAG_HAS_FINALISER = 1,
};
void *gc_alloc(size_t n_bytes, unsigned int alloc_flags, bool long_lived);
void *gc_alloc(size_t n_bytes, unsigned int alloc_flags);
void gc_free(void *ptr); // does not call finaliser
size_t gc_nbytes(const void *ptr);
bool gc_has_finaliser(const void *ptr);
void *gc_make_long_lived(void *old_ptr);
void *gc_realloc(void *ptr, size_t n_bytes, bool allow_move);
// Prevents a pointer from ever being freed because it establishes a permanent reference to it. Use

View File

@ -1,149 +0,0 @@
/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2018 Scott Shawcroft for Adafruit Industries LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "py/emitglue.h"
#include "py/gc_long_lived.h"
#include "py/gc.h"
#include "py/mpstate.h"
mp_obj_fun_bc_t *make_fun_bc_long_lived(mp_obj_fun_bc_t *fun_bc, uint8_t max_depth) {
#ifndef MICROPY_ENABLE_GC
return fun_bc;
#endif
if (fun_bc == NULL || MP_OBJ_FROM_PTR(fun_bc) == mp_const_none || max_depth == 0) {
return fun_bc;
}
fun_bc->bytecode = gc_make_long_lived((byte *)fun_bc->bytecode);
fun_bc->context->module.globals = make_dict_long_lived(fun_bc->context->module.globals, max_depth - 1);
// for (uint32_t i = 0; i < gc_nbytes(fun_bc->const_table) / sizeof(mp_obj_t); i++) {
// // Skip things that aren't allocated on the heap (and hence have zero bytes.)
// if (gc_nbytes(MP_OBJ_TO_PTR(fun_bc->const_table[i])) == 0) {
// continue;
// }
// // Try to detect raw code.
// mp_raw_code_t *raw_code = MP_OBJ_TO_PTR(fun_bc->const_table[i]);
// if (raw_code->kind == MP_CODE_BYTECODE) {
// raw_code->fun_data = gc_make_long_lived((byte *)raw_code->fun_data);
// raw_code->const_table = gc_make_long_lived((byte *)raw_code->const_table);
// }
// ((mp_uint_t *)fun_bc->const_table)[i] = (mp_uint_t)make_obj_long_lived(
// (mp_obj_t)fun_bc->const_table[i], max_depth - 1);
// }
// fun_bc->const_table = gc_make_long_lived((mp_uint_t *)fun_bc->const_table);
// // extra_args stores keyword only argument default values.
// size_t words = gc_nbytes(fun_bc) / sizeof(mp_uint_t *);
// // Functions (mp_obj_fun_bc_t) have four pointers (base, globals, bytecode and const_table)
// // before the variable length extra_args so remove them from the length.
// for (size_t i = 0; i < words - 4; i++) {
// if (MP_OBJ_TO_PTR(fun_bc->extra_args[i]) == NULL) {
// continue;
// }
// if (mp_obj_is_type(fun_bc->extra_args[i], &mp_type_dict)) {
// fun_bc->extra_args[i] = MP_OBJ_FROM_PTR(make_dict_long_lived(MP_OBJ_TO_PTR(fun_bc->extra_args[i]), max_depth - 1));
// } else {
// fun_bc->extra_args[i] = make_obj_long_lived(fun_bc->extra_args[i], max_depth - 1);
// }
// }
return gc_make_long_lived(fun_bc);
}
mp_obj_property_t *make_property_long_lived(mp_obj_property_t *prop, uint8_t max_depth) {
#ifndef MICROPY_ENABLE_GC
return prop;
#endif
if (max_depth == 0) {
return prop;
}
prop->proxy[0] = make_obj_long_lived(prop->proxy[0], max_depth - 1);
prop->proxy[1] = make_obj_long_lived(prop->proxy[1], max_depth - 1);
prop->proxy[2] = make_obj_long_lived(prop->proxy[2], max_depth - 1);
return gc_make_long_lived(prop);
}
mp_obj_dict_t *make_dict_long_lived(mp_obj_dict_t *dict, uint8_t max_depth) {
#ifndef MICROPY_ENABLE_GC
return dict;
#endif
if (dict == NULL || max_depth == 0 || dict == &MP_STATE_VM(dict_main) || dict->map.is_fixed) {
return dict;
}
// Don't recurse unnecessarily. Return immediately if we've already seen this dict.
if (dict->map.scanning) {
return dict;
}
// Mark that we're processing this dict.
dict->map.scanning = 1;
// Update all of the references first so that we reduce the chance of references to the old
// copies.
dict->map.table = gc_make_long_lived(dict->map.table);
for (size_t i = 0; i < dict->map.alloc; i++) {
if (mp_map_slot_is_filled(&dict->map, i)) {
mp_obj_t value = dict->map.table[i].value;
dict->map.table[i].value = make_obj_long_lived(value, max_depth - 1);
}
}
dict = gc_make_long_lived(dict);
// Done recursing through this dict.
dict->map.scanning = 0;
return dict;
}
mp_obj_str_t *make_str_long_lived(mp_obj_str_t *str) {
str->data = gc_make_long_lived((byte *)str->data);
return gc_make_long_lived(str);
}
mp_obj_t make_obj_long_lived(mp_obj_t obj, uint8_t max_depth) {
#ifndef MICROPY_ENABLE_GC
return obj;
#endif
if (MP_OBJ_TO_PTR(obj) == NULL) {
return obj;
}
// If not in the GC pool, do nothing. This can happen (at least) when
// there are frozen mp_type_bytes objects in ROM.
if (!VERIFY_PTR((void *)obj)) {
return obj;
}
if (mp_obj_is_type(obj, &mp_type_fun_bc)) {
mp_obj_fun_bc_t *fun_bc = MP_OBJ_TO_PTR(obj);
return MP_OBJ_FROM_PTR(make_fun_bc_long_lived(fun_bc, max_depth));
} else if (mp_obj_is_type(obj, &mp_type_property)) {
mp_obj_property_t *prop = MP_OBJ_TO_PTR(obj);
return MP_OBJ_FROM_PTR(make_property_long_lived(prop, max_depth));
} else if (mp_obj_is_type(obj, &mp_type_str) || mp_obj_is_type(obj, &mp_type_bytes)) {
mp_obj_str_t *str = MP_OBJ_TO_PTR(obj);
return MP_OBJ_FROM_PTR(make_str_long_lived(str));
} else if (mp_obj_is_type(obj, &mp_type_type)) {
// Types are already long lived during creation.
return obj;
} else {
return MP_OBJ_FROM_PTR(gc_make_long_lived(MP_OBJ_TO_PTR(obj)));
}
}

View File

@ -1,43 +0,0 @@
/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2018 Scott Shawcroft for Adafruit Industries LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
// These helpers move MicroPython objects and their sub-objects to the long lived portion of the
// heap.
#ifndef MICROPY_INCLUDED_PY_GC_LONG_LIVED_H
#define MICROPY_INCLUDED_PY_GC_LONG_LIVED_H
#include "py/objfun.h"
#include "py/objproperty.h"
#include "py/objstr.h"
mp_obj_fun_bc_t *make_fun_bc_long_lived(mp_obj_fun_bc_t *fun_bc, uint8_t max_depth);
mp_obj_property_t *make_property_long_lived(mp_obj_property_t *prop, uint8_t max_depth);
mp_obj_dict_t *make_dict_long_lived(mp_obj_dict_t *dict, uint8_t max_depth);
mp_obj_str_t *make_str_long_lived(mp_obj_str_t *str);
mp_obj_t make_obj_long_lived(mp_obj_t obj, uint8_t max_depth);
#endif // MICROPY_INCLUDED_PY_GC_LONG_LIVED_H

View File

@ -56,8 +56,8 @@
#undef malloc
#undef free
#undef realloc
#define malloc_ll(b, ll) gc_alloc((b), false, (ll))
#define malloc_with_finaliser(b, ll) gc_alloc((b), true, (ll))
#define malloc(b) gc_alloc((b), false)
#define malloc_with_finaliser(b) gc_alloc((b), true)
#define free gc_free
#define realloc(ptr, n) gc_realloc(ptr, n, true)
#define realloc_ext(ptr, n, mv) gc_realloc(ptr, n, mv)
@ -69,9 +69,6 @@
#error MICROPY_ENABLE_FINALISER requires MICROPY_ENABLE_GC
#endif
#define malloc_ll(b, ll) malloc(b)
#define malloc_with_finaliser(b) malloc((b))
STATIC void *realloc_ext(void *ptr, size_t n_bytes, bool allow_move) {
if (allow_move) {
return realloc(ptr, n_bytes);
@ -85,8 +82,8 @@ STATIC void *realloc_ext(void *ptr, size_t n_bytes, bool allow_move) {
#endif // MICROPY_ENABLE_GC
void *m_malloc(size_t num_bytes, bool long_lived) {
void *ptr = malloc_ll(num_bytes, long_lived);
void *m_malloc(size_t num_bytes) {
void *ptr = malloc(num_bytes);
if (ptr == NULL && num_bytes != 0) {
m_malloc_fail(num_bytes);
}
@ -99,8 +96,8 @@ void *m_malloc(size_t num_bytes, bool long_lived) {
return ptr;
}
void *m_malloc_maybe(size_t num_bytes, bool long_lived) {
void *ptr = malloc_ll(num_bytes, long_lived);
void *m_malloc_maybe(size_t num_bytes) {
void *ptr = malloc(num_bytes);
#if MICROPY_MEM_STATS
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
@ -111,8 +108,8 @@ void *m_malloc_maybe(size_t num_bytes, bool long_lived) {
}
#if MICROPY_ENABLE_FINALISER
void *m_malloc_with_finaliser(size_t num_bytes, bool long_lived) {
void *ptr = malloc_with_finaliser(num_bytes, long_lived);
void *m_malloc_with_finaliser(size_t num_bytes) {
void *ptr = malloc_with_finaliser(num_bytes);
if (ptr == NULL && num_bytes != 0) {
m_malloc_fail(num_bytes);
}
@ -126,8 +123,8 @@ void *m_malloc_with_finaliser(size_t num_bytes, bool long_lived) {
}
#endif
void *m_malloc0(size_t num_bytes, bool long_lived) {
void *ptr = m_malloc(num_bytes, long_lived);
void *m_malloc0(size_t num_bytes) {
void *ptr = m_malloc(num_bytes);
// If this config is set then the GC clears all memory, so we don't need to.
#if !MICROPY_GC_CONSERVATIVE_CLEAR
memset(ptr, 0, num_bytes);
@ -136,10 +133,11 @@ void *m_malloc0(size_t num_bytes, bool long_lived) {
}
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes) {
void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes)
#else
void *m_realloc(void *ptr, size_t new_num_bytes) {
#endif
void *m_realloc(void *ptr, size_t new_num_bytes)
#endif
{
void *new_ptr = realloc(ptr, new_num_bytes);
if (new_ptr == NULL && new_num_bytes != 0) {
m_malloc_fail(new_num_bytes);
@ -164,10 +162,11 @@ void *m_realloc(void *ptr, size_t new_num_bytes) {
}
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move) {
void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move)
#else
void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move) {
#endif
void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move)
#endif
{
void *new_ptr = realloc_ext(ptr, new_num_bytes, allow_move);
#if MICROPY_MEM_STATS
// At first thought, "Total bytes allocated" should only grow,

View File

@ -63,26 +63,19 @@ typedef unsigned int uint;
// TODO make a lazy m_renew that can increase by a smaller amount than requested (but by at least 1 more element)
#define m_new(type, num) ((type *)(m_malloc(sizeof(type) * (num), false)))
#define m_new_ll(type, num) ((type *)(m_malloc(sizeof(type) * (num), true)))
#define m_new_maybe(type, num) ((type *)(m_malloc_maybe(sizeof(type) * (num), false)))
#define m_new_ll_maybe(type, num) ((type *)(m_malloc_maybe(sizeof(type) * (num), true)))
#define m_new0(type, num) ((type *)(m_malloc0(sizeof(type) * (num), false)))
#define m_new0_ll(type, num) ((type *)(m_malloc0(sizeof(type) * (num), true)))
#define m_new(type, num) ((type *)(m_malloc(sizeof(type) * (num))))
#define m_new_maybe(type, num) ((type *)(m_malloc_maybe(sizeof(type) * (num))))
#define m_new0(type, num) ((type *)(m_malloc0(sizeof(type) * (num))))
#define m_new_obj(type) (m_new(type, 1))
#define m_new_ll_obj(type) (m_new_ll(type, 1))
#define m_new_obj_maybe(type) (m_new_maybe(type, 1))
#define m_new_obj_var(obj_type, var_type, var_num) ((obj_type *)m_malloc(sizeof(obj_type) + sizeof(var_type) * (var_num), false))
#define m_new_obj_var_maybe(obj_type, var_type, var_num) ((obj_type *)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num), false))
#define m_new_ll_obj_var_maybe(obj_type, var_type, var_num) ((obj_type *)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num), true))
#define m_new_obj_var(obj_type, var_type, var_num) ((obj_type *)m_malloc(sizeof(obj_type) + sizeof(var_type) * (var_num)))
#define m_new_obj_var_maybe(obj_type, var_type, var_num) ((obj_type *)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num)))
#if MICROPY_ENABLE_FINALISER
#define m_new_obj_with_finaliser(type) ((type *)(m_malloc_with_finaliser(sizeof(type), false)))
#define m_new_obj_var_with_finaliser(type, var_type, var_num) ((type *)m_malloc_with_finaliser(sizeof(type) + sizeof(var_type) * (var_num), false))
#define m_new_ll_obj_with_finaliser(type) ((type *)(m_malloc_with_finaliser(sizeof(type), true)))
#define m_new_obj_with_finaliser(type) ((type *)(m_malloc_with_finaliser(sizeof(type))))
#define m_new_obj_var_with_finaliser(type, var_type, var_num) ((type *)m_malloc_with_finaliser(sizeof(type) + sizeof(var_type) * (var_num)))
#else
#define m_new_obj_with_finaliser(type) m_new_obj(type)
#define m_new_obj_var_with_finaliser(type, var_type, var_num) m_new_obj_var(type, var_type, var_num)
#define m_new_ll_obj_with_finaliser(type) m_new_ll_obj(type)
#endif
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
#define m_renew(type, ptr, old_num, new_num) ((type *)(m_realloc((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num))))
@ -97,10 +90,10 @@ typedef unsigned int uint;
#endif
#define m_del_obj(type, ptr) (m_del(type, ptr, 1))
void *m_malloc(size_t num_bytes, bool long_lived);
void *m_malloc_maybe(size_t num_bytes, bool long_lived);
void *m_malloc_with_finaliser(size_t num_bytes, bool long_lived);
void *m_malloc0(size_t num_bytes, bool long_lived);
void *m_malloc(size_t num_bytes);
void *m_malloc_maybe(size_t num_bytes);
void *m_malloc_with_finaliser(size_t num_bytes);
void *m_malloc0(size_t num_bytes);
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes);
void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move);
@ -279,4 +272,66 @@ typedef union _mp_float_union_t {
#endif // MICROPY_PY_BUILTINS_FLOAT
/** ROM string compression *************/
#if MICROPY_ROM_TEXT_COMPRESSION
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NONE
#error "MICROPY_ERROR_REPORTING_NONE requires MICROPY_ROM_TEXT_COMPRESSION disabled"
#endif
#ifdef NO_QSTR
// Compression enabled but doing QSTR extraction.
// So leave MP_COMPRESSED_ROM_TEXT in place for makeqstrdefs.py / makecompresseddata.py to find them.
#else
// Compression enabled and doing a regular build.
// Map MP_COMPRESSED_ROM_TEXT to the compressed strings.
// Force usage of the MP_ERROR_TEXT macro by requiring an opaque type.
typedef struct {
#ifdef __clang__
// Fix "error: empty struct has size 0 in C, size 1 in C++".
char dummy;
#endif
} *mp_rom_error_text_t;
#include <string.h>
inline __attribute__((always_inline)) const char *MP_COMPRESSED_ROM_TEXT(const char *msg) {
// "genhdr/compressed.data.h" contains an invocation of the MP_MATCH_COMPRESSED macro for each compressed string.
// The giant if(strcmp) tree is optimized by the compiler, which turns this into a direct return of the compressed data.
#define MP_MATCH_COMPRESSED(a, b) if (strcmp(msg, a) == 0) { return b; } else
// It also contains a single invocation of the MP_COMPRESSED_DATA macro, we don't need that here.
#define MP_COMPRESSED_DATA(x)
#include "genhdr/compressed.data.h"
#undef MP_COMPRESSED_DATA
#undef MP_MATCH_COMPRESSED
return msg;
}
#endif
#else
// Compression not enabled, just make it a no-op.
typedef const char *mp_rom_error_text_t;
#define MP_COMPRESSED_ROM_TEXT(x) x
#endif // MICROPY_ROM_TEXT_COMPRESSION
// Might add more types of compressed text in the future.
// For now, forward directly to MP_COMPRESSED_ROM_TEXT.
// CIRCUITPY: MP_ERROR_TEXT() -> translate()
#if !CIRCUITPY
#define MP_ERROR_TEXT(x) (mp_rom_error_text_t)MP_COMPRESSED_ROM_TEXT(x)
#endif
#endif // MICROPY_INCLUDED_PY_MISC_H

View File

@ -26,7 +26,7 @@
#ifndef MICROPY_INCLUDED_PY_MPCONFIG_H
#define MICROPY_INCLUDED_PY_MPCONFIG_H
// In CircuitPython, this is defined in genghdr/mpversion.h
// In CircuitPython, this is defined in genhdr/mpversion.h
#if !CIRCUITPY
// Current version of MicroPython
#define MICROPY_VERSION_MAJOR 1
@ -201,12 +201,6 @@
#define MICROPY_ALLOC_QSTR_CHUNK_INIT (128)
#endif
// Max number of entries in newly allocated QSTR pools. Smaller numbers may make QSTR lookups
// slightly slower but reduce the waste of unused spots.
#ifndef MICROPY_QSTR_POOL_MAX_ENTRIES
#define MICROPY_QSTR_POOL_MAX_ENTRIES (64)
#endif
// Initial amount for lexer indentation level
#ifndef MICROPY_ALLOC_LEXER_INDENT_INIT
#define MICROPY_ALLOC_LEXER_INDENT_INIT (10)
@ -319,14 +313,6 @@
#define alloca(x) m_malloc(x)
#endif
// Number of atb indices to cache. Allocations of fewer blocks will be faster
// because the search will be accelerated by the index cache. This only applies
// to short lived allocations because we assume the long lived allocations are
// contiguous.
#ifndef MICROPY_ATB_INDICES
#define MICROPY_ATB_INDICES (8)
#endif
/*****************************************************************************/
/* MicroPython emitters */
@ -384,11 +370,6 @@
#define MICROPY_EMIT_INLINE_THUMB (0)
#endif
// Whether to enable ARMv7-M instruction support in the Thumb2 inline assembler
#ifndef MICROPY_EMIT_INLINE_THUMB_ARMV7M
#define MICROPY_EMIT_INLINE_THUMB_ARMV7M (1)
#endif
// Whether to enable float support in the Thumb2 inline assembler
#ifndef MICROPY_EMIT_INLINE_THUMB_FLOAT
#define MICROPY_EMIT_INLINE_THUMB_FLOAT (1)
@ -807,6 +788,7 @@ typedef long long mp_longint_impl_t;
#define MICROPY_CPYTHON_EXCEPTION_CHAIN (0)
#endif
// CIRCUITPY
// Whether the statically allocated GeneratorExit exception may be const
#ifndef MICROPY_CONST_GENERATOREXIT_OBJ
#define MICROPY_CONST_GENERATOREXIT_OBJ (!MICROPY_CPYTHON_EXCEPTION_CHAIN)

View File

@ -87,12 +87,10 @@ typedef struct _mp_state_mem_t {
byte *gc_pool_start;
byte *gc_pool_end;
void *gc_lowest_long_lived_ptr;
int gc_stack_overflow;
MICROPY_GC_STACK_ENTRY_TYPE gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
// This variable controls auto garbage collection. If set to false then the
// This variable controls auto garbage collection. If set to 0 then the
// GC won't automatically run when gc_alloc can't find enough blocks. But
// you can still allocate/free memory and also explicitly call gc_collect.
uint16_t gc_auto_collect_enabled;
@ -102,7 +100,6 @@ typedef struct _mp_state_mem_t {
size_t gc_alloc_threshold;
#endif
size_t gc_first_free_atb_index[MICROPY_ATB_INDICES];
size_t gc_last_free_atb_index;
#if MICROPY_PY_GC_COLLECT_RETVAL

View File

@ -45,7 +45,7 @@
// Allocates an object and also sets type, for mp_obj_malloc{,_var} macros.
void *mp_obj_malloc_helper(size_t num_bytes, const mp_obj_type_t *type) {
mp_obj_base_t *base = (mp_obj_base_t *)m_malloc(num_bytes, false);
mp_obj_base_t *base = (mp_obj_base_t *)m_malloc(num_bytes);
base->type = type;
return base;
}

View File

@ -439,7 +439,6 @@ typedef struct _mp_rom_obj_t { mp_const_obj_t o; } mp_rom_obj_t;
// Declare a module as a builtin, processed by makemoduledefs.py
// param module_name: MP_QSTR_<module name>
// param obj_module: mp_obj_module_t instance
// param enabled_define: used as `#if (enabled_define) around entry`
#ifndef NO_QSTR
#define MP_REGISTER_MODULE(module_name, obj_module)
@ -461,9 +460,7 @@ typedef struct _mp_map_t {
size_t all_keys_are_qstrs : 1;
size_t is_fixed : 1; // if set, table is fixed/read-only and can't be modified
size_t is_ordered : 1; // if set, table is an ordered array, not a hash map
size_t scanning : 1; // true if we're in the middle of scanning linked dictionaries,
// e.g., make_dict_long_lived()
size_t used : (8 * sizeof(size_t) - 4);
size_t used : (8 * sizeof(size_t) - 3);
size_t alloc;
mp_map_elem_t *table;
} mp_map_t;
@ -1098,7 +1095,6 @@ typedef struct _mp_obj_fun_builtin_var_t {
} mp_obj_fun_builtin_var_t;
qstr mp_obj_fun_get_name(mp_const_obj_t fun);
qstr mp_obj_code_get_name(const byte *code_info);
mp_obj_t mp_identity(mp_obj_t self);
MP_DECLARE_CONST_FUN_OBJ_1(mp_identity_obj);

View File

@ -31,8 +31,7 @@
typedef struct _mp_obj_fun_bc_t {
mp_obj_base_t base;
// CIRCUITPY - Long-lived conversions need to write into context, so un-const the field.
/*const*/ mp_module_context_t *context; // context within which this function was defined
const mp_module_context_t *context; // context within which this function was defined
struct _mp_raw_code_t *const *child_table; // table of children
const byte *bytecode; // bytecode for the function
#if MICROPY_PY_SYS_SETTRACE

View File

@ -38,6 +38,7 @@
#include "supervisor/shared/translate/translate.h"
// Instance of GeneratorExit exception - needed by generator.close()
// CIRCUITPY: https://github.com/adafruit/circuitpython/pull/7069 fix
#if MICROPY_CONST_GENERATOREXIT_OBJ
const
mp_obj_exception_t mp_static_GeneratorExit_obj = {{&mp_type_GeneratorExit}, (mp_obj_tuple_t *)&mp_const_empty_tuple_obj, (mp_obj_traceback_t *)&mp_const_empty_traceback_obj};
@ -49,12 +50,6 @@ mp_obj_exception_t mp_static_GeneratorExit_obj;
/******************************************************************************/
/* generator wrapper */
typedef struct _mp_obj_gen_wrap_t {
mp_obj_base_t base;
mp_obj_t *fun;
bool coroutine_generator;
} mp_obj_gen_wrap_t;
typedef struct _mp_obj_gen_instance_t {
mp_obj_base_t base;
// mp_const_none: Not-running, no exception.
@ -89,11 +84,13 @@ const mp_obj_type_t mp_type_gen_wrap = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF,
.name = MP_QSTR_generator,
.call = gen_wrap_call,
.unary_op = mp_generic_unary_op,
#if MICROPY_PY_FUNCTION_ATTRS
.attr = mp_obj_fun_bc_attr,
#endif
MP_TYPE_EXTENDED_FIELDS(
.call = gen_wrap_call,
.unary_op = mp_generic_unary_op,
),
};
/******************************************************************************/
@ -150,53 +147,9 @@ STATIC mp_obj_t native_gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_k
return MP_OBJ_FROM_PTR(o);
}
#endif // MICROPY_EMIT_NATIVE
STATIC mp_obj_t bc_gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
mp_obj_gen_wrap_t *self = MP_OBJ_TO_PTR(self_in);
mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t *)self->fun;
assert(self_fun->base.type == &mp_type_fun_bc);
// bytecode prelude: get state size and exception stack size
const uint8_t *ip = self_fun->bytecode;
MP_BC_PRELUDE_SIG_DECODE(ip);
// allocate the generator object, with room for local stack and exception stack
mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, byte,
n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t));
o->base.type = &mp_type_gen_instance;
o->coroutine_generator = self->coroutine_generator;
o->pend_exc = mp_const_none;
o->code_state.fun_bc = self_fun;
o->code_state.ip = 0;
o->code_state.n_state = n_state;
mp_setup_code_state(&o->code_state, n_args, n_kw, args);
return MP_OBJ_FROM_PTR(o);
}
STATIC mp_obj_t gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
#if MICROPY_EMIT_NATIVE
mp_obj_gen_wrap_t *self = MP_OBJ_TO_PTR(self_in);
mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t *)self->fun;
if (self_fun->base.type == &mp_type_fun_native) {
return native_gen_wrap_call(self, n_args, n_kw, args);
}
#endif
return bc_gen_wrap_call(self_in, n_args, n_kw, args);
}
#if MICROPY_PY_FUNCTION_ATTRS
static void gen_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
mp_obj_gen_wrap_t *self = MP_OBJ_TO_PTR(self_in);
mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t *)self->fun;
mp_obj_fun_bc_attr(MP_OBJ_FROM_PTR(self_fun), attr, dest);
}
#endif
const mp_obj_type_t mp_type_gen_wrap = {
const mp_obj_type_t mp_type_native_gen_wrap = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_EXTENDED,
.flags = MP_TYPE_FLAG_BINDS_SELF,
.name = MP_QSTR_generator,
#if MICROPY_PY_FUNCTION_ATTRS
.attr = gen_attr,
@ -207,14 +160,7 @@ const mp_obj_type_t mp_type_gen_wrap = {
),
};
mp_obj_t mp_obj_new_gen_wrap(mp_obj_t fun, bool is_coroutine) {
mp_obj_gen_wrap_t *o = m_new_obj(mp_obj_gen_wrap_t);
o->base.type = &mp_type_gen_wrap;
o->fun = MP_OBJ_TO_PTR(fun);
o->coroutine_generator = is_coroutine;
return MP_OBJ_FROM_PTR(o);
}
#endif // MICROPY_EMIT_NATIVE
/******************************************************************************/
/* generator instance */
@ -363,7 +309,6 @@ STATIC mp_obj_t gen_resume_and_raise(mp_obj_t self_in, mp_obj_t send_value, mp_o
STATIC mp_obj_t gen_instance_iternext(mp_obj_t self_in) {
#if MICROPY_PY_ASYNC_AWAIT
// This translate is literally too much for m0 boards
mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
if (self->coroutine_generator) {
mp_raise_TypeError(MP_ERROR_TEXT("'coroutine' object is not an iterator"));

View File

@ -108,7 +108,7 @@ STATIC void module_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
#if MICROPY_CAN_OVERRIDE_BUILTINS
if (dict == &mp_module_builtins_globals) {
if (MP_STATE_VM(mp_module_builtins_override_dict) == NULL) {
MP_STATE_VM(mp_module_builtins_override_dict) = gc_make_long_lived(MP_OBJ_TO_PTR(mp_obj_new_dict(1)));
MP_STATE_VM(mp_module_builtins_override_dict) = MP_OBJ_TO_PTR(mp_obj_new_dict(1));
}
dict = MP_STATE_VM(mp_module_builtins_override_dict);
} else

View File

@ -30,7 +30,6 @@
#include <string.h>
#include <assert.h>
#include "py/gc_long_lived.h"
#include "py/objtype.h"
#include "py/runtime.h"
@ -1202,7 +1201,7 @@ mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict)
#endif
}
mp_obj_full_type_t *o = m_new0_ll(mp_obj_full_type_t, 1);
mp_obj_full_type_t *o = m_new0(mp_obj_full_type_t, 1);
o->base.type = &mp_type_type;
o->flags = base_flags;
o->name = name;
@ -1235,7 +1234,7 @@ mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict)
}
}
o->locals_dict = make_dict_long_lived(MP_OBJ_TO_PTR(locals_dict), 10);
o->locals_dict = MP_OBJ_TO_PTR(locals_dict);
#if ENABLE_SPECIAL_ACCESSORS
// Check if the class has any special accessor methods

View File

@ -72,7 +72,6 @@ PY_CORE_O_BASENAME = $(addprefix py/,\
nlrsetjmp.o \
malloc.o \
gc.o \
gc_long_lived.o \
pystack.o \
qstr.o \
vstr.o \

View File

@ -156,7 +156,7 @@ STATIC qstr qstr_add(mp_uint_t hash, mp_uint_t len, const char *q_ptr) {
#endif
mp_uint_t pool_size = sizeof(qstr_pool_t)
+ (sizeof(const char *) + sizeof(qstr_hash_t) + sizeof(qstr_len_t)) * new_alloc;
qstr_pool_t *pool = (qstr_pool_t *)m_malloc_maybe(pool_size, true);
qstr_pool_t *pool = (qstr_pool_t *)m_malloc_maybe(pool_size);
if (pool == NULL) {
// Keep qstr_last_chunk consistent with qstr_pool_t: qstr_last_chunk is not scanned
// at garbage collection since it's reachable from a qstr_pool_t. And the caller of
@ -244,10 +244,10 @@ qstr qstr_from_strn(const char *str, size_t len) {
if (al < MICROPY_ALLOC_QSTR_CHUNK_INIT) {
al = MICROPY_ALLOC_QSTR_CHUNK_INIT;
}
MP_STATE_VM(qstr_last_chunk) = m_new_ll_maybe(char, al);
MP_STATE_VM(qstr_last_chunk) = m_new_maybe(char, al);
if (MP_STATE_VM(qstr_last_chunk) == NULL) {
// failed to allocate a large chunk so try with exact size
MP_STATE_VM(qstr_last_chunk) = m_new_ll_maybe(char, n_bytes);
MP_STATE_VM(qstr_last_chunk) = m_new_maybe(char, n_bytes);
if (MP_STATE_VM(qstr_last_chunk) == NULL) {
QSTR_EXIT();
m_malloc_fail(n_bytes);

View File

@ -37,8 +37,8 @@ bool ringbuf_init(ringbuf_t *r, uint8_t *buf, size_t size) {
}
// Dynamic initialization. This should be accessible from a root pointer..
bool ringbuf_alloc(ringbuf_t *r, size_t size, bool long_lived) {
bool result = ringbuf_init(r, gc_alloc(size, false, long_lived), size);
bool ringbuf_alloc(ringbuf_t *r, size_t size) {
bool result = ringbuf_init(r, gc_alloc(size, false), size);
return result;
}

View File

@ -43,7 +43,7 @@ typedef struct _ringbuf_t {
bool ringbuf_init(ringbuf_t *r, uint8_t *buf, size_t capacity);
// For allocation of a buffer on the heap, use ringbuf_alloc().
bool ringbuf_alloc(ringbuf_t *r, size_t capacity, bool long_lived);
bool ringbuf_alloc(ringbuf_t *r, size_t capacity);
// Mark ringbuf as no longer in use, and allow any heap storage to be freed by gc.
void ringbuf_deinit(ringbuf_t *r);

View File

@ -111,7 +111,6 @@ STATIC mp_obj_t pewpew_make_new(const mp_obj_type_t *type, size_t n_args, size_t
pew_obj_t *pew = MP_STATE_VM(pew_singleton);
if (!pew) {
pew = mp_obj_malloc(pew_obj_t, &pewpew_type);
pew = gc_make_long_lived(pew);
MP_STATE_VM(pew_singleton) = pew;
}

View File

@ -118,12 +118,9 @@ STATIC mp_obj_t audiopwmio_pwmaudioout_make_new(const mp_obj_type_t *type, size_
validate_obj_is_free_pin_or_none(args[ARG_right_channel].u_obj, MP_QSTR_right_channel);
// create AudioOut object from the given pin
// The object is made long-lived because many implementations keep
// a pointer to the object (e.g., for the interrupt handler), which
// will not work properly if the object is moved. It is created
// with a finaliser as some ports use these (rather than 'reset' functions)
// The object is created with a finaliser as some ports use these (rather than 'reset' functions)
// to ensure resources are collected at interpreter shutdown.
audiopwmio_pwmaudioout_obj_t *self = m_new_ll_obj_with_finaliser(audiopwmio_pwmaudioout_obj_t);
audiopwmio_pwmaudioout_obj_t *self = m_new_obj_with_finaliser(audiopwmio_pwmaudioout_obj_t);
self->base.type = &audiopwmio_pwmaudioout_type;
common_hal_audiopwmio_pwmaudioout_construct(self, left_channel_pin, right_channel_pin, args[ARG_quiescent_value].u_int);

View File

@ -155,11 +155,7 @@ STATIC mp_obj_t busio_uart_make_new(const mp_obj_type_t *type, size_t n_args, si
const bool rs485_invert = args[ARG_rs485_invert].u_bool;
// Always initially allocate the UART object within the long-lived heap.
// This is needed to avoid crashes with certain UART implementations which
// cannot accommodate being moved after creation. (See
// https://github.com/adafruit/circuitpython/issues/1056)
busio_uart_obj_t *self = m_new_ll_obj_with_finaliser(busio_uart_obj_t);
busio_uart_obj_t *self = m_new_obj_with_finaliser(busio_uart_obj_t);
self->base.type = &busio_uart_type;
common_hal_busio_uart_construct(self, tx, rx, rts, cts, rs485_dir, rs485_invert,

View File

@ -56,8 +56,7 @@ STATIC mp_obj_t countio_counter_make_new(const mp_obj_type_t *type, size_t n_arg
const mcu_pin_obj_t *pin = validate_obj_is_free_pin(args[ARG_pin].u_obj, MP_QSTR_pin);
const countio_edge_t edge = validate_edge(args[ARG_edge].u_obj, MP_QSTR_edge);
const digitalio_pull_t pull = validate_pull(args[ARG_pull].u_obj, MP_QSTR_pull);
// Make long-lived because some implementations use a pointer to the object as interrupt-handler data.
countio_counter_obj_t *self = m_new_ll_obj_with_finaliser(countio_counter_obj_t);
countio_counter_obj_t *self = m_new_obj_with_finaliser(countio_counter_obj_t);
self->base.type = &countio_counter_type;
common_hal_countio_counter_construct(self, pin, edge, pull);

View File

@ -88,8 +88,7 @@ STATIC mp_obj_t pulseio_pulsein_make_new(const mp_obj_type_t *type, size_t n_arg
mp_arg_parse_all_kw_array(n_args, n_kw, all_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
const mcu_pin_obj_t *pin = validate_obj_is_free_pin(args[ARG_pin].u_obj, MP_QSTR_pin);
// Make object long-lived to avoid moving between imports
pulseio_pulsein_obj_t *self = m_new_ll_obj_with_finaliser(pulseio_pulsein_obj_t);
pulseio_pulsein_obj_t *self = m_new_obj_with_finaliser(pulseio_pulsein_obj_t);
self->base.type = &pulseio_pulsein_type;
common_hal_pulseio_pulsein_construct(self, pin, args[ARG_maxlen].u_int,

View File

@ -75,8 +75,7 @@ STATIC mp_obj_t rotaryio_incrementalencoder_make_new(const mp_obj_type_t *type,
const mcu_pin_obj_t *pin_a = validate_obj_is_free_pin(args[ARG_pin_a].u_obj, MP_QSTR_pin_a);
const mcu_pin_obj_t *pin_b = validate_obj_is_free_pin(args[ARG_pin_b].u_obj, MP_QSTR_pin_b);
// Make long-lived because some implementations use a pointer to the object as interrupt-handler data.
rotaryio_incrementalencoder_obj_t *self = m_new_ll_obj_with_finaliser(rotaryio_incrementalencoder_obj_t);
rotaryio_incrementalencoder_obj_t *self = m_new_obj_with_finaliser(rotaryio_incrementalencoder_obj_t);
self->base.type = &rotaryio_incrementalencoder_type;
common_hal_rotaryio_incrementalencoder_construct(self, pin_a, pin_b);

View File

@ -198,7 +198,7 @@ void common_hal_usb_hid_device_construct(usb_hid_device_obj_t *self, mp_obj_t re
// Copy the raw descriptor bytes into a heap obj. We don't keep the Python descriptor object.
uint8_t *descriptor_bytes = gc_alloc(bufinfo.len, false, false);
uint8_t *descriptor_bytes = gc_alloc(bufinfo.len, false);
memcpy(descriptor_bytes, bufinfo.buf, bufinfo.len);
self->report_descriptor = descriptor_bytes;
@ -255,12 +255,12 @@ void usb_hid_device_create_report_buffers(usb_hid_device_obj_t *self) {
// which is an unusual case. Normally we can just pass the data directly with tud_hid_report().
self->in_report_buffers[i] =
self->in_report_lengths[i] > 0
? gc_alloc(self->in_report_lengths[i], false, true /*long-lived*/)
? gc_alloc(self->in_report_lengths[i], false)
: NULL;
self->out_report_buffers[i] =
self->out_report_lengths[i] > 0
? gc_alloc(self->out_report_lengths[i], false, true /*long-lived*/)
? gc_alloc(self->out_report_lengths[i], false)
: NULL;
}
memset(self->out_report_buffers_updated, 0, sizeof(self->out_report_buffers_updated));

View File

@ -33,7 +33,6 @@
#include "py/runtime.h"
#include "py/repl.h"
#include "py/gc.h"
#include "py/gc_long_lived.h"
#include "py/frozenmod.h"
#include "py/mphal.h"
#if MICROPY_HW_ENABLE_USB
@ -122,6 +121,12 @@ STATIC int parse_compile_execute(const void *source, mp_parse_input_kind_t input
#endif
}
// If the code was loaded from a file, collect any garbage before running.
if (input_kind == MP_PARSE_FILE_INPUT) {
gc_collect();
}
}
// execute code
mp_hal_set_interrupt_char(CHAR_CTRL_C); // allow ctrl-C to interrupt us
#if MICROPY_REPL_INFO

View File

@ -212,7 +212,7 @@ static supervisor_allocation_node *allocate_memory_node(uint32_t length, bool hi
if (!node) {
// 4. GC allocation?
if (movable && gc_alloc_possible()) {
node = m_malloc_maybe(sizeof(supervisor_allocation_node) + length, true);
node = m_malloc_maybe(sizeof(supervisor_allocation_node) + length);
if (node) {
node->next = MP_STATE_VM(first_embedded_allocation);
MP_STATE_VM(first_embedded_allocation) = node;

View File

@ -288,10 +288,6 @@ def do_all_the_things(
dynamic_type = 0x40000000 # placeholder, doesn't match any memory
long_lived_start = load_pointer(
mp_state_ctx + 272
) # (gdb) p &mp_state_ctx.mem.gc_lowest_long_lived_ptr
type_colors = {
dict_type: "red",
property_type: "yellow",
@ -368,9 +364,6 @@ def do_all_the_things(
potential_type = None
node = ownership_graph.get_node(address)
node.attr["height"] = 0.25 * current_allocation
if address >= long_lived_start:
node.attr["fontcolor"] = "hotpink"
else:
node.attr["fontcolor"] = "black"
block_data[address] = data
for k in range(len(data) // 4):
@ -666,9 +659,6 @@ def do_all_the_things(
block, 18 * (len(wrapped) - 1), "<br/>".join(wrapped)
)
node.attr["fontname"] = "FiraCode-Bold"
if block >= long_lived_start:
node.attr["fontcolor"] = "hotpink"
else:
node.attr["fontcolor"] = "black"
node.attr["fontpath"] = "/Users/tannewt/Library/Fonts/"
node.attr["fontsize"] = 8