circuitpython/py/qstr.c

307 lines
12 KiB
C
Raw Normal View History

/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
2020-06-03 18:40:05 -04:00
* SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
2013-10-04 14:53:11 -04:00
#include <assert.h>
#include <string.h>
#include <stdio.h>
2013-10-04 14:53:11 -04:00
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
#include "py/gc.h"
#include "py/mpstate.h"
#include "py/qstr.h"
#include "py/gc.h"
2013-10-04 14:53:11 -04:00
#include "supervisor/linker.h"
// NOTE: we are using linear arrays to store and search for qstr's (unique strings, interned strings)
// ultimately we will replace this with a static hash table of some kind
// also probably need to include the length in the string data, to allow null bytes in the string
#if MICROPY_DEBUG_VERBOSE // print debugging info
#define DEBUG_printf DEBUG_printf
#else // don't print debugging info
#define DEBUG_printf(...) (void)0
#endif
// A qstr is an index into the qstr pool.
// The data for a qstr contains (hash, length, data):
// - hash (configurable number of bytes)
// - length (configurable number of bytes)
// - data ("length" number of bytes)
// - \0 terminated (so they can be printed using printf)
#if MICROPY_QSTR_BYTES_IN_HASH == 1
#define Q_HASH_MASK (0xff)
#define Q_GET_HASH(q) ((mp_uint_t)(q)[0])
#define Q_SET_HASH(q, hash) do { (q)[0] = (hash); } while (0)
#elif MICROPY_QSTR_BYTES_IN_HASH == 2
#define Q_HASH_MASK (0xffff)
#define Q_GET_HASH(q) ((mp_uint_t)(q)[0] | ((mp_uint_t)(q)[1] << 8))
#define Q_SET_HASH(q, hash) do { (q)[0] = (hash); (q)[1] = (hash) >> 8; } while (0)
#else
#error unimplemented qstr hash decoding
#endif
#define Q_GET_ALLOC(q) (MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN + Q_GET_LENGTH(q) + 1)
#define Q_GET_DATA(q) ((q) + MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN)
#if MICROPY_QSTR_BYTES_IN_LEN == 1
#define Q_GET_LENGTH(q) ((q)[MICROPY_QSTR_BYTES_IN_HASH])
#define Q_SET_LENGTH(q, len) do { (q)[MICROPY_QSTR_BYTES_IN_HASH] = (len); } while (0)
#elif MICROPY_QSTR_BYTES_IN_LEN == 2
#define Q_GET_LENGTH(q) ((q)[MICROPY_QSTR_BYTES_IN_HASH] | ((q)[MICROPY_QSTR_BYTES_IN_HASH + 1] << 8))
#define Q_SET_LENGTH(q, len) do { (q)[MICROPY_QSTR_BYTES_IN_HASH] = (len); (q)[MICROPY_QSTR_BYTES_IN_HASH + 1] = (len) >> 8; } while (0)
#else
#error unimplemented qstr length decoding
#endif
#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
#define QSTR_ENTER() mp_thread_mutex_lock(&MP_STATE_VM(qstr_mutex), 1)
#define QSTR_EXIT() mp_thread_mutex_unlock(&MP_STATE_VM(qstr_mutex))
#else
#define QSTR_ENTER()
#define QSTR_EXIT()
#endif
// this must match the equivalent function in makeqstrdata.py
mp_uint_t qstr_compute_hash(const byte *data, size_t len) {
// djb2 algorithm; see http://www.cse.yorku.ca/~oz/hash.html
mp_uint_t hash = 5381;
for (const byte *top = data + len; data < top; data++) {
hash = ((hash << 5) + hash) ^ (*data); // hash * 33 ^ data
}
hash &= Q_HASH_MASK;
// Make sure that valid hash is never zero, zero means "hash not computed"
if (hash == 0) {
hash++;
}
return hash;
}
const qstr_pool_t mp_qstr_const_pool = {
NULL, // no previous pool
0, // no previous pool
10, // set so that the first dynamically allocated pool is twice this size; must be <= the len (just below)
MP_QSTRnumber_of, // corresponds to number of strings in array just below
{
#ifndef NO_QSTR
#define QDEF(id, str) str,
#define TRANSLATION(id, length, compressed...)
#include "genhdr/qstrdefs.generated.h"
2018-07-31 19:53:54 -04:00
#undef TRANSLATION
#undef QDEF
#endif
},
};
#ifdef MICROPY_QSTR_EXTRA_POOL
extern const qstr_pool_t MICROPY_QSTR_EXTRA_POOL;
#define CONST_POOL MICROPY_QSTR_EXTRA_POOL
#else
#define CONST_POOL mp_qstr_const_pool
#endif
void qstr_init(void) {
MP_STATE_VM(last_pool) = (qstr_pool_t*)&CONST_POOL; // we won't modify the const_pool since it has no allocated room left
MP_STATE_VM(qstr_last_chunk) = NULL;
#if MICROPY_PY_THREAD
mp_thread_mutex_init(&MP_STATE_VM(qstr_mutex));
#endif
2013-10-04 14:53:11 -04:00
}
STATIC const byte *find_qstr(qstr q) {
// search pool for this qstr
// total_prev_len==0 in the final pool, so the loop will always terminate
qstr_pool_t *pool = MP_STATE_VM(last_pool);
while (q < pool->total_prev_len) {
pool = pool->prev;
}
assert(q - pool->total_prev_len < pool->len);
return pool->qstrs[q - pool->total_prev_len];
}
// qstr_mutex must be taken while in this function
STATIC qstr qstr_add(const byte *q_ptr) {
DEBUG_printf("QSTR: add hash=%d len=%d data=%.*s\n", Q_GET_HASH(q_ptr), Q_GET_LENGTH(q_ptr), Q_GET_LENGTH(q_ptr), Q_GET_DATA(q_ptr));
// make sure we have room in the pool for a new qstr
if (MP_STATE_VM(last_pool)->len >= MP_STATE_VM(last_pool)->alloc) {
uint32_t new_pool_length = MP_STATE_VM(last_pool)->alloc * 2;
if (new_pool_length > MICROPY_QSTR_POOL_MAX_ENTRIES) {
new_pool_length = MICROPY_QSTR_POOL_MAX_ENTRIES;
}
qstr_pool_t *pool = m_new_ll_obj_var_maybe(qstr_pool_t, const char*, new_pool_length);
if (pool == NULL) {
QSTR_EXIT();
m_malloc_fail(new_pool_length);
}
pool->prev = MP_STATE_VM(last_pool);
pool->total_prev_len = MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len;
pool->alloc = new_pool_length;
pool->len = 0;
MP_STATE_VM(last_pool) = pool;
DEBUG_printf("QSTR: allocate new pool of size %d\n", MP_STATE_VM(last_pool)->alloc);
2013-10-04 14:53:11 -04:00
}
// add the new qstr
MP_STATE_VM(last_pool)->qstrs[MP_STATE_VM(last_pool)->len++] = q_ptr;
// return id for the newly-added qstr
return MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len - 1;
2013-10-04 14:53:11 -04:00
}
qstr qstr_find_strn(const char *str, size_t str_len) {
// work out hash of str
mp_uint_t str_hash = qstr_compute_hash((const byte*)str, str_len);
// search pools for the data
for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL; pool = pool->prev) {
for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
if (Q_GET_HASH(*q) == str_hash && Q_GET_LENGTH(*q) == str_len && memcmp(Q_GET_DATA(*q), str, str_len) == 0) {
return pool->total_prev_len + (q - pool->qstrs);
}
2013-10-04 14:53:11 -04:00
}
}
// not found; return null qstr
return 0;
2013-10-04 14:53:11 -04:00
}
qstr qstr_from_str(const char *str) {
return qstr_from_strn(str, strlen(str));
2013-10-04 14:53:11 -04:00
}
qstr qstr_from_strn(const char *str, size_t len) {
assert(len < (1 << (8 * MICROPY_QSTR_BYTES_IN_LEN)));
QSTR_ENTER();
qstr q = qstr_find_strn(str, len);
if (q == 0) {
// qstr does not exist in interned pool so need to add it
// compute number of bytes needed to intern this string
size_t n_bytes = MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN + len + 1;
if (MP_STATE_VM(qstr_last_chunk) != NULL && MP_STATE_VM(qstr_last_used) + n_bytes > MP_STATE_VM(qstr_last_alloc)) {
// not enough room at end of previously interned string so try to grow
byte *new_p = m_renew_maybe(byte, MP_STATE_VM(qstr_last_chunk), MP_STATE_VM(qstr_last_alloc), MP_STATE_VM(qstr_last_alloc) + n_bytes, false);
if (new_p == NULL) {
// could not grow existing memory; shrink it to fit previous
(void)m_renew_maybe(byte, MP_STATE_VM(qstr_last_chunk), MP_STATE_VM(qstr_last_alloc), MP_STATE_VM(qstr_last_used), false);
MP_STATE_VM(qstr_last_chunk) = NULL;
} else {
// could grow existing memory
MP_STATE_VM(qstr_last_alloc) += n_bytes;
}
}
if (MP_STATE_VM(qstr_last_chunk) == NULL) {
// no existing memory for the interned string so allocate a new chunk
size_t al = n_bytes;
if (al < MICROPY_ALLOC_QSTR_CHUNK_INIT) {
al = MICROPY_ALLOC_QSTR_CHUNK_INIT;
}
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
MP_STATE_VM(qstr_last_chunk) = m_new_ll_maybe(byte, al);
if (MP_STATE_VM(qstr_last_chunk) == NULL) {
// failed to allocate a large chunk so try with exact size
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
MP_STATE_VM(qstr_last_chunk) = m_new_ll_maybe(byte, n_bytes);
if (MP_STATE_VM(qstr_last_chunk) == NULL) {
QSTR_EXIT();
m_malloc_fail(n_bytes);
}
al = n_bytes;
}
MP_STATE_VM(qstr_last_alloc) = al;
MP_STATE_VM(qstr_last_used) = 0;
}
// allocate memory from the chunk for this new interned string's data
byte *q_ptr = MP_STATE_VM(qstr_last_chunk) + MP_STATE_VM(qstr_last_used);
MP_STATE_VM(qstr_last_used) += n_bytes;
// store the interned strings' data
mp_uint_t hash = qstr_compute_hash((const byte*)str, len);
Q_SET_HASH(q_ptr, hash);
Q_SET_LENGTH(q_ptr, len);
memcpy(q_ptr + MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN, str, len);
q_ptr[MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN + len] = '\0';
q = qstr_add(q_ptr);
2013-10-04 14:53:11 -04:00
}
QSTR_EXIT();
return q;
2013-10-04 14:53:11 -04:00
}
mp_uint_t PLACE_IN_ITCM(qstr_hash)(qstr q) {
return Q_GET_HASH(find_qstr(q));
}
size_t qstr_len(qstr q) {
const byte *qd = find_qstr(q);
return Q_GET_LENGTH(qd);
}
const char *qstr_str(qstr q) {
const byte *qd = find_qstr(q);
return (const char*)Q_GET_DATA(qd);
}
const byte *qstr_data(qstr q, size_t *len) {
const byte *qd = find_qstr(q);
*len = Q_GET_LENGTH(qd);
return Q_GET_DATA(qd);
2013-10-04 14:53:11 -04:00
}
void qstr_pool_info(size_t *n_pool, size_t *n_qstr, size_t *n_str_data_bytes, size_t *n_total_bytes) {
QSTR_ENTER();
*n_pool = 0;
*n_qstr = 0;
*n_str_data_bytes = 0;
*n_total_bytes = 0;
for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL && pool != &CONST_POOL; pool = pool->prev) {
*n_pool += 1;
*n_qstr += pool->len;
for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
*n_str_data_bytes += Q_GET_ALLOC(*q);
}
#if MICROPY_ENABLE_GC
*n_total_bytes += gc_nbytes(pool); // this counts actual bytes used in heap
#else
*n_total_bytes += sizeof(qstr_pool_t) + sizeof(qstr) * pool->alloc;
#endif
}
*n_total_bytes += *n_str_data_bytes;
QSTR_EXIT();
}
#if MICROPY_PY_MICROPYTHON_MEM_INFO
void qstr_dump_data(void) {
QSTR_ENTER();
for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL && pool != &CONST_POOL; pool = pool->prev) {
for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
mp_printf(&mp_plat_print, "Q(%s)\n", Q_GET_DATA(*q));
}
}
QSTR_EXIT();
}
#endif