diff --git a/py/gc.c b/py/gc.c index 826540d353..fc6bc90b67 100644 --- a/py/gc.c +++ b/py/gc.c @@ -138,7 +138,6 @@ void gc_init(void *start, void *end) { MP_STATE_MEM(gc_alloc_table_start) = (byte *)start; #if MICROPY_ENABLE_FINALISER - size_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB; MP_STATE_MEM(gc_finaliser_table_start) = MP_STATE_MEM(gc_alloc_table_start) + MP_STATE_MEM(gc_alloc_table_byte_len) + 1; #endif @@ -147,18 +146,16 @@ void gc_init(void *start, void *end) { MP_STATE_MEM(gc_pool_end) = end; #if MICROPY_ENABLE_FINALISER + size_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB; + (void)gc_finaliser_table_byte_len; // avoid unused variable diagnostic if asserts are disabled assert(MP_STATE_MEM(gc_pool_start) >= MP_STATE_MEM(gc_finaliser_table_start) + gc_finaliser_table_byte_len); #endif - // Clear ATBs plus one more byte. The extra byte might be read when we read the final ATB and - // then try to count its tail. Clearing the byte ensures it is 0 and ends the chain. Without an - // FTB, it'll just clear the pool byte early. - memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_alloc_table_byte_len) + 1); - - #if MICROPY_ENABLE_FINALISER - // clear FTBs - memset(MP_STATE_MEM(gc_finaliser_table_start), 0, gc_finaliser_table_byte_len); - #endif + // Clear ATBs & finalisers (if enabled). This also clears the extra byte + // which appears between ATBs and finalisers that ensures every chain in + // the ATB terminates, rather than erroneously using bits from the + // finalisers. + memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_pool_start) - MP_STATE_MEM(gc_alloc_table_start)); // Set first free ATB index to the start of the heap. for (size_t i = 0; i < MICROPY_ATB_INDICES; i++) { diff --git a/shared/libc/string0.c b/shared/libc/string0.c index 86e7cc5960..92b063c552 100644 --- a/shared/libc/string0.c +++ b/shared/libc/string0.c @@ -28,6 +28,8 @@ #include #include +#include "py/mpconfig.h" + #ifndef likely #define likely(x) __builtin_expect((x), 1) #endif @@ -35,6 +37,7 @@ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-align" void *memcpy(void *dst, const void *src, size_t n) { +#if CIRCUITPY_FULL_BUILD if (likely(!(((uintptr_t)dst) & 3) && !(((uintptr_t)src) & 3))) { // pointers aligned uint32_t *d = dst; @@ -56,7 +59,9 @@ void *memcpy(void *dst, const void *src, size_t n) { // copy byte *((uint8_t*)d) = *((const uint8_t*)s); } - } else { + } else +#endif + { // unaligned access, copy bytes uint8_t *d = dst; const uint8_t *s = src; @@ -93,6 +98,7 @@ void *memmove(void *dest, const void *src, size_t n) { } void *memset(void *s, int c, size_t n) { +#if CIRCUITPY_FULL_BUILD if (c == 0 && ((uintptr_t)s & 3) == 0) { // aligned store of 0 uint32_t *s32 = s; @@ -106,7 +112,9 @@ void *memset(void *s, int c, size_t n) { if (n & 1) { *((uint8_t*)s32) = 0; } - } else { + } else +#endif + { uint8_t *s2 = s; for (; n > 0; n--) { *s2++ = c;