circuitpython/py/malloc.c

215 lines
7.2 KiB
C
Raw Normal View History

/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2013, 2014 Damien P. George
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
2013-10-04 14:53:11 -04:00
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
2013-10-04 14:53:11 -04:00
#include "py/mpconfig.h"
#include "py/misc.h"
#include "py/mpstate.h"
2013-10-04 14:53:11 -04:00
#if MICROPY_DEBUG_VERBOSE // print debugging info
#define DEBUG_printf DEBUG_printf
#else // don't print debugging info
#define DEBUG_printf(...) (void)0
#endif
#if MICROPY_MEM_STATS
#if !MICROPY_MALLOC_USES_ALLOCATED_SIZE
#error MICROPY_MEM_STATS requires MICROPY_MALLOC_USES_ALLOCATED_SIZE
#endif
#define UPDATE_PEAK() { if (MP_STATE_MEM(current_bytes_allocated) > MP_STATE_MEM(peak_bytes_allocated)) MP_STATE_MEM(peak_bytes_allocated) = MP_STATE_MEM(current_bytes_allocated); }
#endif
2013-10-04 14:53:11 -04:00
#if MICROPY_ENABLE_GC
#include "py/gc.h"
// We redirect standard alloc functions to GC heap - just for the rest of
// this module. In the rest of MicroPython source, system malloc can be
// freely accessed - for interfacing with system and 3rd-party libs for
// example. On the other hand, some (e.g. bare-metal) ports may use GC
// heap as system heap, so, to avoid warnings, we do undef's first.
#undef malloc
#undef free
#undef realloc
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
#define malloc_ll(b, ll) gc_alloc((b), false, (ll))
#define malloc_with_finaliser(b) gc_alloc((b), true, false)
#define free gc_free
#define realloc(ptr, n) gc_realloc(ptr, n, true)
#define realloc_ext(ptr, n, mv) gc_realloc(ptr, n, mv)
#else
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
#define malloc_ll(b, ll) malloc(b)
#define malloc_with_finaliser(b) malloc((b))
STATIC void *realloc_ext(void *ptr, size_t n_bytes, bool allow_move) {
if (allow_move) {
return realloc(ptr, n_bytes);
} else {
// We are asked to resize, but without moving the memory region pointed to
// by ptr. Unless the underlying memory manager has special provision for
// this behaviour there is nothing we can do except fail to resize.
return NULL;
}
}
#endif // MICROPY_ENABLE_GC
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
void *m_malloc(size_t num_bytes, bool long_lived) {
void *ptr = malloc_ll(num_bytes, long_lived);
if (ptr == NULL && num_bytes != 0) {
m_malloc_fail(num_bytes);
2013-10-04 14:53:11 -04:00
}
#if MICROPY_MEM_STATS
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
2013-10-04 14:53:11 -04:00
return ptr;
}
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
void *m_malloc_maybe(size_t num_bytes, bool long_lived) {
void *ptr = malloc_ll(num_bytes, long_lived);
#if MICROPY_MEM_STATS
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
return ptr;
2013-10-04 14:53:11 -04:00
}
#if MICROPY_ENABLE_FINALISER
void *m_malloc_with_finaliser(size_t num_bytes) {
void *ptr = malloc_with_finaliser(num_bytes);
if (ptr == NULL && num_bytes != 0) {
m_malloc_fail(num_bytes);
2014-04-03 17:55:12 -04:00
}
#if MICROPY_MEM_STATS
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
2014-04-03 17:55:12 -04:00
UPDATE_PEAK();
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
return ptr;
}
#endif
2014-04-03 17:55:12 -04:00
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
void *m_malloc0(size_t num_bytes, bool long_lived) {
void *ptr = m_malloc(num_bytes, long_lived);
// If this config is set then the GC clears all memory, so we don't need to.
#if !MICROPY_GC_CONSERVATIVE_CLEAR
memset(ptr, 0, num_bytes);
#endif
2013-10-04 14:53:11 -04:00
return ptr;
}
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes) {
#else
void *m_realloc(void *ptr, size_t new_num_bytes) {
#endif
void *new_ptr = realloc(ptr, new_num_bytes);
if (new_ptr == NULL && new_num_bytes != 0) {
m_malloc_fail(new_num_bytes);
2013-10-04 14:53:11 -04:00
}
#if MICROPY_MEM_STATS
// At first thought, "Total bytes allocated" should only grow,
// after all, it's *total*. But consider for example 2K block
// shrunk to 1K and then grown to 2K again. It's still 2K
// allocated total. If we process only positive increments,
// we'll count 3K.
size_t diff = new_num_bytes - old_num_bytes;
MP_STATE_MEM(total_bytes_allocated) += diff;
MP_STATE_MEM(current_bytes_allocated) += diff;
UPDATE_PEAK();
#endif
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
#else
DEBUG_printf("realloc %p, %d : %p\n", ptr, new_num_bytes, new_ptr);
#endif
return new_ptr;
}
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move) {
#else
void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move) {
#endif
void *new_ptr = realloc_ext(ptr, new_num_bytes, allow_move);
#if MICROPY_MEM_STATS
// At first thought, "Total bytes allocated" should only grow,
// after all, it's *total*. But consider for example 2K block
// shrunk to 1K and then grown to 2K again. It's still 2K
// allocated total. If we process only positive increments,
// we'll count 3K.
// Also, don't count failed reallocs.
if (!(new_ptr == NULL && new_num_bytes != 0)) {
size_t diff = new_num_bytes - old_num_bytes;
MP_STATE_MEM(total_bytes_allocated) += diff;
MP_STATE_MEM(current_bytes_allocated) += diff;
UPDATE_PEAK();
}
#endif
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
#else
DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, new_num_bytes, new_ptr);
#endif
return new_ptr;
2013-10-04 14:53:11 -04:00
}
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
void m_free(void *ptr, size_t num_bytes) {
#else
void m_free(void *ptr) {
#endif
free(ptr);
#if MICROPY_MEM_STATS
MP_STATE_MEM(current_bytes_allocated) -= num_bytes;
#endif
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
DEBUG_printf("free %p, %d\n", ptr, num_bytes);
#else
DEBUG_printf("free %p\n", ptr);
#endif
}
#if MICROPY_MEM_STATS
size_t m_get_total_bytes_allocated(void) {
return MP_STATE_MEM(total_bytes_allocated);
2013-10-04 14:53:11 -04:00
}
size_t m_get_current_bytes_allocated(void) {
return MP_STATE_MEM(current_bytes_allocated);
}
size_t m_get_peak_bytes_allocated(void) {
return MP_STATE_MEM(peak_bytes_allocated);
}
#endif