circuitpython/py/objmodule.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

349 lines
13 KiB
C
Raw Normal View History

/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* SPDX-FileCopyrightText: Copyright (c) 2013-2019 Damien P. George
* SPDX-FileCopyrightText: Copyright (c) 2014-2015 Paul Sokolovsky
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <stdlib.h>
#include <string.h>
#include <assert.h>
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
#include "py/gc.h"
#include "py/objmodule.h"
#include "py/runtime.h"
#include "py/builtin.h"
2019-04-05 15:40:28 -04:00
#include "genhdr/moduledefs.h"
#if MICROPY_MODULE_BUILTIN_INIT
STATIC void mp_module_call_init(mp_obj_t module_name, mp_obj_t module_obj);
#endif
STATIC void module_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
(void)kind;
mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
const char *module_name = "";
mp_map_elem_t *elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_MAP_LOOKUP);
if (elem != NULL) {
module_name = mp_obj_str_get_str(elem->value);
}
2021-03-15 09:57:36 -04:00
#if MICROPY_PY___FILE__
// If we store __file__ to imported modules then try to lookup this
// symbol to give more information about the module.
elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(MP_QSTR___file__), MP_MAP_LOOKUP);
if (elem != NULL) {
mp_printf(print, "<module '%s' from '%s'>", module_name, mp_obj_str_get_str(elem->value));
return;
}
2021-03-15 09:57:36 -04:00
#endif
mp_printf(print, "<module '%s'>", module_name);
}
STATIC void module_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
if (dest[0] == MP_OBJ_NULL) {
// load attribute
mp_map_elem_t *elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
if (elem != NULL) {
dest[0] = elem->value;
#if MICROPY_MODULE_GETATTR
} else if (attr != MP_QSTR___getattr__) {
elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(MP_QSTR___getattr__), MP_MAP_LOOKUP);
if (elem != NULL) {
dest[0] = mp_call_function_1(elem->value, MP_OBJ_NEW_QSTR(attr));
}
#endif
}
} else {
// delete/store attribute
mp_obj_dict_t *dict = self->globals;
if (dict->map.is_fixed) {
mp_map_elem_t *elem = mp_map_lookup(&dict->map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
#if MICROPY_CAN_OVERRIDE_BUILTINS
if (dict == &mp_module_builtins_globals) {
if (MP_STATE_VM(mp_module_builtins_override_dict) == NULL) {
MP_STATE_VM(mp_module_builtins_override_dict) = gc_make_long_lived(MP_OBJ_TO_PTR(mp_obj_new_dict(1)));
}
dict = MP_STATE_VM(mp_module_builtins_override_dict);
} else
#endif
// Return success if the given value is already in the dictionary. This is the case for
// native packages with native submodules.
if (elem != NULL && elem->value == dest[1]) {
dest[0] = MP_OBJ_NULL; // indicate success
return;
} else {
// can't delete or store to fixed map
return;
}
}
if (dest[1] == MP_OBJ_NULL) {
// delete attribute
mp_obj_dict_delete(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr));
} else {
// store attribute
mp_obj_t long_lived = MP_OBJ_FROM_PTR(gc_make_long_lived(MP_OBJ_TO_PTR(dest[1])));
// TODO CPython allows STORE_ATTR to a module, but is this the correct implementation?
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
mp_obj_dict_store(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr), long_lived);
}
dest[0] = MP_OBJ_NULL; // indicate success
}
}
const mp_obj_type_t mp_type_module = {
{ &mp_type_type },
.name = MP_QSTR_module,
.print = module_print,
.attr = module_attr,
};
mp_obj_t mp_obj_new_module(qstr module_name) {
mp_map_t *mp_loaded_modules_map = &MP_STATE_VM(mp_loaded_modules_dict).map;
mp_map_elem_t *el = mp_map_lookup(mp_loaded_modules_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
// We could error out if module already exists, but let C extensions
// add new members to existing modules.
if (el->value != MP_OBJ_NULL) {
return el->value;
}
// create new module object
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
mp_obj_module_t *o = m_new_ll_obj(mp_obj_module_t);
o->base.type = &mp_type_module;
o->globals = gc_make_long_lived(MP_OBJ_TO_PTR(mp_obj_new_dict(MICROPY_MODULE_DICT_SIZE)));
// store __name__ entry in the module
mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(module_name));
// store the new module into the slot in the global dict holding all modules
el->value = MP_OBJ_FROM_PTR(o);
// return the new module
return MP_OBJ_FROM_PTR(o);
}
mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t self_in) {
assert(mp_obj_is_type(self_in, &mp_type_module));
mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
return self->globals;
}
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
void mp_obj_module_set_globals(mp_obj_t self_in, mp_obj_dict_t *globals) {
assert(mp_obj_is_type(self_in, &mp_type_module));
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
self->globals = globals;
}
/******************************************************************************/
// Global module table and related functions
STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = {
{ MP_ROM_QSTR(MP_QSTR___main__), MP_ROM_PTR(&mp_module___main__) },
{ MP_ROM_QSTR(MP_QSTR_builtins), MP_ROM_PTR(&mp_module_builtins) },
{ MP_ROM_QSTR(MP_QSTR_micropython), MP_ROM_PTR(&mp_module_micropython) },
2021-03-15 09:57:36 -04:00
#if MICROPY_PY_IO
#if CIRCUITPY
{ MP_ROM_QSTR(MP_QSTR_io), MP_ROM_PTR(&mp_module_io) },
2021-03-15 09:57:36 -04:00
#else
{ MP_ROM_QSTR(MP_QSTR_uio), MP_ROM_PTR(&mp_module_io) },
2021-03-15 09:57:36 -04:00
#endif
#endif
#if MICROPY_PY_COLLECTIONS
{ MP_ROM_QSTR(MP_QSTR_collections), MP_ROM_PTR(&mp_module_collections) },
2021-03-15 09:57:36 -04:00
#endif
// CircuitPython: Now in shared-bindings/, so not defined here.
2021-03-15 09:57:36 -04:00
#if MICROPY_PY_STRUCT
{ MP_ROM_QSTR(MP_QSTR_ustruct), MP_ROM_PTR(&mp_module_ustruct) },
2021-03-15 09:57:36 -04:00
#endif
2021-03-15 09:57:36 -04:00
#if MICROPY_PY_BUILTINS_FLOAT
#if MICROPY_PY_MATH
{ MP_ROM_QSTR(MP_QSTR_math), MP_ROM_PTR(&mp_module_math) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_BUILTINS_COMPLEX && MICROPY_PY_CMATH
{ MP_ROM_QSTR(MP_QSTR_cmath), MP_ROM_PTR(&mp_module_cmath) },
2021-03-15 09:57:36 -04:00
#endif
#endif
#if MICROPY_PY_SYS
{ MP_ROM_QSTR(MP_QSTR_sys), MP_ROM_PTR(&mp_module_sys) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_GC && MICROPY_ENABLE_GC
{ MP_ROM_QSTR(MP_QSTR_gc), MP_ROM_PTR(&mp_module_gc) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_THREAD
{ MP_ROM_QSTR(MP_QSTR__thread), MP_ROM_PTR(&mp_module_thread) },
2021-03-15 09:57:36 -04:00
#endif
// extmod modules
2021-03-15 09:57:36 -04:00
#if MICROPY_PY_UERRNO
#if CIRCUITPY
// CircuitPython: Defined in MICROPY_PORT_BUILTIN_MODULES, so not defined here.
// TODO: move to shared-bindings/
2021-03-15 09:57:36 -04:00
#else
{ MP_ROM_QSTR(MP_QSTR_uerrno), MP_ROM_PTR(&mp_module_uerrno) },
2021-03-15 09:57:36 -04:00
#endif
#endif
#if MICROPY_PY_UCTYPES
{ MP_ROM_QSTR(MP_QSTR_uctypes), MP_ROM_PTR(&mp_module_uctypes) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_UZLIB
{ MP_ROM_QSTR(MP_QSTR_uzlib), MP_ROM_PTR(&mp_module_uzlib) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_UJSON
#if CIRCUITPY
// CircuitPython: Defined in MICROPY_PORT_BUILTIN_MODULES, so not defined here.
// TODO: move to shared-bindings/
2021-03-15 09:57:36 -04:00
#else
{ MP_ROM_QSTR(MP_QSTR_ujson), MP_ROM_PTR(&mp_module_ujson) },
2021-03-15 09:57:36 -04:00
#endif
#endif
#if CIRCUITPY_ULAB
#if CIRCUITPY
2020-02-04 11:24:37 -05:00
// CircuitPython: Defined in MICROPY_PORT_BUILTIN_MODULES, so not defined here.
// TODO: move to shared-bindings/
2021-03-15 09:57:36 -04:00
#else
2020-02-04 11:24:37 -05:00
{ MP_ROM_QSTR(MP_QSTR_ulab), MP_ROM_PTR(&ulab_user_cmodule) },
2021-03-15 09:57:36 -04:00
#endif
#endif
#if MICROPY_PY_URE
#if CIRCUITPY
// CircuitPython: Defined in MICROPY_PORT_BUILTIN_MODULES, so not defined here.
// TODO: move to shared-bindings/
2021-03-15 09:57:36 -04:00
#else
{ MP_ROM_QSTR(MP_QSTR_ure), MP_ROM_PTR(&mp_module_ure) },
2021-03-15 09:57:36 -04:00
#endif
#endif
#if MICROPY_PY_UHEAPQ
{ MP_ROM_QSTR(MP_QSTR_uheapq), MP_ROM_PTR(&mp_module_uheapq) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_UTIMEQ
{ MP_ROM_QSTR(MP_QSTR_utimeq), MP_ROM_PTR(&mp_module_utimeq) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_UHASHLIB
{ MP_ROM_QSTR(MP_QSTR_hashlib), MP_ROM_PTR(&mp_module_uhashlib) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_UBINASCII
#if CIRCUITPY
2020-11-01 14:52:03 -05:00
// CircuitPython: Defined in MICROPY_PORT_BUILTIN_MODULES, so not defined here.
// TODO: move to shared-bindings/
2021-03-15 09:57:36 -04:00
#else
2020-11-01 14:52:03 -05:00
{ MP_ROM_QSTR(MP_QSTR_ubinascii), MP_ROM_PTR(&mp_module_ubinascii) },
2021-03-15 09:57:36 -04:00
#endif
#endif
#if MICROPY_PY_URANDOM
{ MP_ROM_QSTR(MP_QSTR_urandom), MP_ROM_PTR(&mp_module_urandom) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_USELECT
{ MP_ROM_QSTR(MP_QSTR_uselect), MP_ROM_PTR(&mp_module_uselect) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_FRAMEBUF
2016-04-08 06:08:37 -04:00
{ MP_ROM_QSTR(MP_QSTR_framebuf), MP_ROM_PTR(&mp_module_framebuf) },
2021-03-15 09:57:36 -04:00
#endif
#if MICROPY_PY_BTREE
{ MP_ROM_QSTR(MP_QSTR_btree), MP_ROM_PTR(&mp_module_btree) },
2021-03-15 09:57:36 -04:00
#endif
// extra builtin modules as defined by a port
MICROPY_PORT_BUILTIN_MODULES
2016-11-29 13:46:47 -05:00
2019-04-05 15:40:28 -04:00
#ifdef MICROPY_REGISTERED_MODULES
// builtin modules declared with MP_REGISTER_MODULE()
MICROPY_REGISTERED_MODULES
#endif
2021-03-15 09:57:36 -04:00
#if defined(MICROPY_DEBUG_MODULES) && defined(MICROPY_PORT_BUILTIN_DEBUG_MODULES)
2016-11-29 13:46:47 -05:00
, MICROPY_PORT_BUILTIN_DEBUG_MODULES
2021-03-15 09:57:36 -04:00
#endif
};
MP_DEFINE_CONST_MAP(mp_builtin_module_map, mp_builtin_module_table);
// Tries to find a loaded module, otherwise attempts to load a builtin, otherwise MP_OBJ_NULL.
mp_obj_t mp_module_get_loaded_or_builtin(qstr module_name) {
// First try loaded modules.
mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_VM(mp_loaded_modules_dict).map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
if (!elem) {
#if MICROPY_MODULE_WEAK_LINKS
return mp_module_get_builtin(module_name);
#else
// Otherwise try builtin.
elem = mp_map_lookup((mp_map_t *)&mp_builtin_module_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
if (!elem) {
return MP_OBJ_NULL;
}
#if MICROPY_MODULE_BUILTIN_INIT
// If found, it's a newly loaded built-in, so init it.
mp_module_call_init(MP_OBJ_NEW_QSTR(module_name), elem->value);
#endif
#endif
}
return elem->value;
}
#if MICROPY_MODULE_WEAK_LINKS
// Tries to find a loaded module, otherwise attempts to load a builtin, otherwise MP_OBJ_NULL.
mp_obj_t mp_module_get_builtin(qstr module_name) {
// Try builtin.
mp_map_elem_t *elem = mp_map_lookup((mp_map_t *)&mp_builtin_module_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
if (!elem) {
return MP_OBJ_NULL;
}
#if MICROPY_MODULE_BUILTIN_INIT
// If found, it's a newly loaded built-in, so init it.
mp_module_call_init(MP_OBJ_NEW_QSTR(module_name), elem->value);
#endif
return elem->value;
}
#endif
#if MICROPY_MODULE_BUILTIN_INIT
STATIC void mp_module_register(mp_obj_t module_name, mp_obj_t module) {
mp_map_t *mp_loaded_modules_map = &MP_STATE_VM(mp_loaded_modules_dict).map;
mp_map_lookup(mp_loaded_modules_map, module_name, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = module;
}
STATIC void mp_module_call_init(mp_obj_t module_name, mp_obj_t module_obj) {
// Look for __init__ and call it if it exists
mp_obj_t dest[2];
mp_load_method_maybe(module_obj, MP_QSTR___init__, dest);
if (dest[0] != MP_OBJ_NULL) {
mp_call_method_n_kw(0, 0, dest);
// Register module so __init__ is not called again.
// If a module can be referenced by more than one name (eg due to weak links)
// then __init__ will still be called for each distinct import, and it's then
// up to the particular module to make sure it's __init__ code only runs once.
mp_module_register(module_name, module_obj);
}
}
#endif