circuitpython/py/gc_long_lived.c

144 lines
5.7 KiB
C
Raw Normal View History

Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2018 Scott Shawcroft for Adafruit Industries LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "py/emitglue.h"
#include "py/gc_long_lived.h"
#include "py/gc.h"
mp_obj_fun_bc_t *make_fun_bc_long_lived(mp_obj_fun_bc_t *fun_bc, uint8_t max_depth) {
#ifndef MICROPY_ENABLE_GC
return fun_bc;
#endif
if (fun_bc == NULL || fun_bc == mp_const_none || max_depth == 0) {
return fun_bc;
}
fun_bc->bytecode = gc_make_long_lived((byte*) fun_bc->bytecode);
fun_bc->globals = make_dict_long_lived(fun_bc->globals, max_depth - 1);
for (uint32_t i = 0; i < gc_nbytes(fun_bc->const_table) / sizeof(mp_obj_t); i++) {
// Skip things that aren't allocated on the heap (and hence have zero bytes.)
if (gc_nbytes((byte *)fun_bc->const_table[i]) == 0) {
continue;
}
// Try to detect raw code.
mp_raw_code_t* raw_code = MP_OBJ_TO_PTR(fun_bc->const_table[i]);
if (raw_code->kind == MP_CODE_BYTECODE) {
raw_code->data.u_byte.bytecode = gc_make_long_lived((byte*) raw_code->data.u_byte.bytecode);
raw_code->data.u_byte.const_table = gc_make_long_lived((byte*) raw_code->data.u_byte.const_table);
}
((mp_uint_t *) fun_bc->const_table)[i] = (mp_uint_t) make_obj_long_lived(
(mp_obj_t) fun_bc->const_table[i], max_depth - 1);
}
fun_bc->const_table = gc_make_long_lived((mp_uint_t*) fun_bc->const_table);
// extra_args stores keyword only argument default values.
size_t words = gc_nbytes(fun_bc) / sizeof(mp_uint_t*);
// Functions (mp_obj_fun_bc_t) have four pointers (base, globals, bytecode and const_table)
// before the variable length extra_args so remove them from the length.
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
for (size_t i = 0; i < words - 4; i++) {
if (fun_bc->extra_args[i] == NULL) {
continue;
}
if (MP_OBJ_IS_TYPE(fun_bc->extra_args[i], &mp_type_dict)) {
fun_bc->extra_args[i] = make_dict_long_lived(fun_bc->extra_args[i], max_depth - 1);
} else {
fun_bc->extra_args[i] = make_obj_long_lived(fun_bc->extra_args[i], max_depth - 1);
}
}
return gc_make_long_lived(fun_bc);
}
mp_obj_property_t *make_property_long_lived(mp_obj_property_t *prop, uint8_t max_depth) {
#ifndef MICROPY_ENABLE_GC
return prop;
#endif
if (max_depth == 0) {
return prop;
}
prop->proxy[0] = make_obj_long_lived((mp_obj_fun_bc_t*) prop->proxy[0], max_depth - 1);
prop->proxy[1] = make_obj_long_lived((mp_obj_fun_bc_t*) prop->proxy[1], max_depth - 1);
prop->proxy[2] = make_obj_long_lived((mp_obj_fun_bc_t*) prop->proxy[2], max_depth - 1);
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
return gc_make_long_lived(prop);
}
mp_obj_dict_t *make_dict_long_lived(mp_obj_dict_t *dict, uint8_t max_depth) {
#ifndef MICROPY_ENABLE_GC
return dict;
#endif
if (dict == NULL || max_depth == 0) {
return dict;
}
// Don't recurse unnecessarily. Return immediately if we've already seen this dict.
if (dict->map.scanning) {
return dict;
}
// Mark that we're processing this dict.
dict->map.scanning = 1;
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
// Update all of the references first so that we reduce the chance of references to the old
// copies.
dict->map.table = gc_make_long_lived(dict->map.table);
for (size_t i = 0; i < dict->map.alloc; i++) {
if (MP_MAP_SLOT_IS_FILLED(&dict->map, i)) {
mp_obj_t value = dict->map.table[i].value;
dict->map.table[i].value = make_obj_long_lived(value, max_depth - 1);
}
}
dict = gc_make_long_lived(dict);
// Done recursing through this dict.
dict->map.scanning = 0;
return dict;
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
}
mp_obj_str_t *make_str_long_lived(mp_obj_str_t *str) {
str->data = gc_make_long_lived((byte *) str->data);
return gc_make_long_lived(str);
}
mp_obj_t make_obj_long_lived(mp_obj_t obj, uint8_t max_depth){
#ifndef MICROPY_ENABLE_GC
return obj;
#endif
if (obj == NULL) {
return obj;
}
if (MP_OBJ_IS_TYPE(obj, &mp_type_fun_bc)) {
mp_obj_fun_bc_t *fun_bc = MP_OBJ_TO_PTR(obj);
return MP_OBJ_FROM_PTR(make_fun_bc_long_lived(fun_bc, max_depth));
} else if (MP_OBJ_IS_TYPE(obj, &mp_type_property)) {
mp_obj_property_t *prop = MP_OBJ_TO_PTR(obj);
return MP_OBJ_FROM_PTR(make_property_long_lived(prop, max_depth));
} else if (MP_OBJ_IS_TYPE(obj, &mp_type_str)) {
mp_obj_str_t *str = MP_OBJ_TO_PTR(obj);
return MP_OBJ_FROM_PTR(make_str_long_lived(str));
} else if (MP_OBJ_IS_TYPE(obj, &mp_type_type)) {
// Types are already long lived during creation.
return obj;
} else {
return gc_make_long_lived(obj);
}
}