2015-05-30 18:11:16 -04:00
|
|
|
"""
|
|
|
|
Process raw qstr file and output qstr data with length, hash and data bytes.
|
|
|
|
|
2020-05-28 08:40:56 -04:00
|
|
|
This script works with Python 2.7, 3.3 and 3.4.
|
2020-05-28 12:29:28 -04:00
|
|
|
|
|
|
|
For documentation about the format of compressed translated strings, see
|
|
|
|
supervisor/shared/translate.h
|
2015-05-30 18:11:16 -04:00
|
|
|
"""
|
|
|
|
|
2014-03-10 03:07:35 -04:00
|
|
|
from __future__ import print_function
|
|
|
|
|
2014-01-21 16:40:13 -05:00
|
|
|
import re
|
2014-03-08 10:03:25 -05:00
|
|
|
import sys
|
2014-01-24 17:22:00 -05:00
|
|
|
|
2018-07-31 19:53:54 -04:00
|
|
|
import collections
|
|
|
|
import gettext
|
2018-08-16 03:27:01 -04:00
|
|
|
import os.path
|
2018-07-31 19:53:54 -04:00
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
if hasattr(sys.stdout, "reconfigure"):
|
|
|
|
sys.stdout.reconfigure(encoding="utf-8")
|
|
|
|
sys.stderr.reconfigure(errors="backslashreplace")
|
2020-09-12 20:39:35 -04:00
|
|
|
|
2018-08-16 03:27:01 -04:00
|
|
|
py = os.path.dirname(sys.argv[0])
|
|
|
|
top = os.path.dirname(py)
|
|
|
|
|
|
|
|
sys.path.append(os.path.join(top, "tools/huffman"))
|
2018-08-15 21:32:37 -04:00
|
|
|
|
|
|
|
import huffman
|
|
|
|
|
2016-04-14 09:37:04 -04:00
|
|
|
# Python 2/3 compatibility:
|
|
|
|
# - iterating through bytes is different
|
|
|
|
# - codepoint2name lives in a different module
|
2014-01-24 17:22:00 -05:00
|
|
|
import platform
|
2021-03-15 09:57:36 -04:00
|
|
|
|
|
|
|
if platform.python_version_tuple()[0] == "2":
|
2016-09-02 00:32:47 -04:00
|
|
|
bytes_cons = lambda val, enc=None: bytearray(val)
|
2014-01-24 17:22:00 -05:00
|
|
|
from htmlentitydefs import codepoint2name
|
2021-03-15 09:57:36 -04:00
|
|
|
elif platform.python_version_tuple()[0] == "3":
|
2016-09-02 00:32:47 -04:00
|
|
|
bytes_cons = bytes
|
2014-01-24 17:22:00 -05:00
|
|
|
from html.entities import codepoint2name
|
2016-09-02 00:32:47 -04:00
|
|
|
# end compatibility code
|
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
codepoint2name[ord("-")] = "hyphen"
|
2014-01-21 16:40:13 -05:00
|
|
|
|
2014-02-15 06:34:50 -05:00
|
|
|
# add some custom names to map characters that aren't in HTML
|
2021-03-15 09:57:36 -04:00
|
|
|
codepoint2name[ord(" ")] = "space"
|
|
|
|
codepoint2name[ord("'")] = "squot"
|
|
|
|
codepoint2name[ord(",")] = "comma"
|
|
|
|
codepoint2name[ord(".")] = "dot"
|
|
|
|
codepoint2name[ord(":")] = "colon"
|
|
|
|
codepoint2name[ord(";")] = "semicolon"
|
|
|
|
codepoint2name[ord("/")] = "slash"
|
|
|
|
codepoint2name[ord("%")] = "percent"
|
|
|
|
codepoint2name[ord("#")] = "hash"
|
|
|
|
codepoint2name[ord("(")] = "paren_open"
|
|
|
|
codepoint2name[ord(")")] = "paren_close"
|
|
|
|
codepoint2name[ord("[")] = "bracket_open"
|
|
|
|
codepoint2name[ord("]")] = "bracket_close"
|
|
|
|
codepoint2name[ord("{")] = "brace_open"
|
|
|
|
codepoint2name[ord("}")] = "brace_close"
|
|
|
|
codepoint2name[ord("*")] = "star"
|
|
|
|
codepoint2name[ord("!")] = "bang"
|
|
|
|
codepoint2name[ord("\\")] = "backslash"
|
|
|
|
codepoint2name[ord("+")] = "plus"
|
|
|
|
codepoint2name[ord("$")] = "dollar"
|
|
|
|
codepoint2name[ord("=")] = "equals"
|
|
|
|
codepoint2name[ord("?")] = "question"
|
|
|
|
codepoint2name[ord("@")] = "at_sign"
|
|
|
|
codepoint2name[ord("^")] = "caret"
|
|
|
|
codepoint2name[ord("|")] = "pipe"
|
|
|
|
codepoint2name[ord("~")] = "tilde"
|
2014-02-15 06:34:50 -05:00
|
|
|
|
2018-08-09 18:58:45 -04:00
|
|
|
C_ESCAPES = {
|
|
|
|
"\a": "\\a",
|
|
|
|
"\b": "\\b",
|
|
|
|
"\f": "\\f",
|
2018-08-10 19:17:03 -04:00
|
|
|
"\n": "\\n",
|
2018-08-09 18:58:45 -04:00
|
|
|
"\r": "\\r",
|
|
|
|
"\t": "\\t",
|
|
|
|
"\v": "\\v",
|
2021-03-15 09:57:36 -04:00
|
|
|
"'": "\\'",
|
|
|
|
'"': '\\"',
|
2018-08-09 18:58:45 -04:00
|
|
|
}
|
|
|
|
|
2014-01-21 16:40:13 -05:00
|
|
|
# this must match the equivalent function in qstr.c
|
2015-07-20 07:03:13 -04:00
|
|
|
def compute_hash(qstr, bytes_hash):
|
2014-03-25 11:27:15 -04:00
|
|
|
hash = 5381
|
2016-09-02 00:32:47 -04:00
|
|
|
for b in qstr:
|
|
|
|
hash = (hash * 33) ^ b
|
2014-06-06 16:55:27 -04:00
|
|
|
# Make sure that valid hash is never zero, zero means "hash not computed"
|
2015-07-20 07:03:13 -04:00
|
|
|
return (hash & ((1 << (8 * bytes_hash)) - 1)) or 1
|
2014-01-21 16:40:13 -05:00
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
|
2018-07-31 19:53:54 -04:00
|
|
|
def translate(translation_file, i18ns):
|
|
|
|
with open(translation_file, "rb") as f:
|
|
|
|
table = gettext.GNUTranslations(f)
|
|
|
|
|
2018-08-09 18:58:45 -04:00
|
|
|
translations = []
|
|
|
|
for original in i18ns:
|
|
|
|
unescaped = original
|
|
|
|
for s in C_ESCAPES:
|
|
|
|
unescaped = unescaped.replace(C_ESCAPES[s], s)
|
2018-08-15 21:32:37 -04:00
|
|
|
translation = table.gettext(unescaped)
|
|
|
|
# Add in carriage returns to work in terminals
|
|
|
|
translation = translation.replace("\n", "\r\n")
|
|
|
|
translations.append((original, translation))
|
2018-08-09 18:58:45 -04:00
|
|
|
return translations
|
2018-07-31 19:53:54 -04:00
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
class TextSplitter:
|
|
|
|
def __init__(self, words):
|
|
|
|
words.sort(key=lambda x: len(x), reverse=True)
|
|
|
|
self.words = set(words)
|
makeqstrdata: Work around python3.6 compatibility problem
Discord user Folknology encountered a problem building with Python 3.6.9,
`TypeError: ord() expected a character, but string of length 0 found`.
I was able to reproduce the problem using Python3.5*, and discovered that
the meaning of the regular expression `"|."` had changed in 3.7. Before,
```
>>> [m.group(0) for m in re.finditer("|.", "hello")]
['', '', '', '', '', '']
```
After:
```
>>> [m.group(0) for m in re.finditer("|.", "hello")]
['', 'h', '', 'e', '', 'l', '', 'l', '', 'o', '']
```
Check if `words` is empty and if so use `"."` as the regular expression
instead. This gives the same result on both versions:
```
['h', 'e', 'l', 'l', 'o']
```
and fixes the generation of the huffman dictionary.
Folknology verified that this fix worked for them.
* I could easily install 3.5 but not 3.6. 3.5 reproduced the same problem
2020-09-21 11:02:27 -04:00
|
|
|
if words:
|
|
|
|
pat = "|".join(re.escape(w) for w in words) + "|."
|
|
|
|
else:
|
|
|
|
pat = "."
|
|
|
|
self.pat = re.compile(pat, flags=re.DOTALL)
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
|
|
|
|
def iter_words(self, text):
|
|
|
|
s = []
|
2020-09-13 12:25:13 -04:00
|
|
|
words = self.words
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
for m in self.pat.finditer(text):
|
|
|
|
t = m.group(0)
|
2020-09-13 12:25:13 -04:00
|
|
|
if t in words:
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
if s:
|
|
|
|
yield (False, "".join(s))
|
|
|
|
s = []
|
|
|
|
yield (True, t)
|
|
|
|
else:
|
|
|
|
s.append(t)
|
|
|
|
if s:
|
|
|
|
yield (False, "".join(s))
|
|
|
|
|
|
|
|
def iter(self, text):
|
|
|
|
for m in self.pat.finditer(text):
|
|
|
|
yield m.group(0)
|
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
def iter_substrings(s, minlen, maxlen):
|
2020-09-13 12:25:13 -04:00
|
|
|
len_s = len(s)
|
|
|
|
maxlen = min(len_s, maxlen)
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
for n in range(minlen, maxlen + 1):
|
2020-09-13 12:25:13 -04:00
|
|
|
for begin in range(0, len_s - n + 1):
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
yield s[begin : begin + n]
|
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
def compute_huffman_coding(translations, compression_filename):
|
|
|
|
texts = [t[1] for t in translations]
|
|
|
|
words = []
|
2020-09-13 12:25:13 -04:00
|
|
|
|
|
|
|
start_unused = 0x80
|
2021-03-15 09:57:36 -04:00
|
|
|
end_unused = 0xFF
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
max_ord = 0
|
|
|
|
for text in texts:
|
|
|
|
for c in text:
|
|
|
|
ord_c = ord(c)
|
2020-09-13 12:25:13 -04:00
|
|
|
max_ord = max(ord_c, max_ord)
|
2021-03-15 09:57:36 -04:00
|
|
|
if 0x80 <= ord_c < 0xFF:
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
end_unused = min(ord_c, end_unused)
|
2020-09-13 12:25:13 -04:00
|
|
|
max_words = end_unused - 0x80
|
|
|
|
|
|
|
|
values_type = "uint16_t" if max_ord > 255 else "uint8_t"
|
|
|
|
max_words_len = 160 if max_ord > 255 else 255
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
|
2020-09-13 12:25:13 -04:00
|
|
|
sum_len = 0
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
while True:
|
2020-09-16 08:58:55 -04:00
|
|
|
# Until the dictionary is filled to capacity, use a heuristic to find
|
2021-04-09 08:36:26 -04:00
|
|
|
# the best "word" (3- to 9-gram) to add to it.
|
2020-09-16 08:58:55 -04:00
|
|
|
#
|
|
|
|
# The TextSplitter allows us to avoid considering parts of the text
|
|
|
|
# that are already covered by a previously chosen word, for example
|
|
|
|
# if "the" is in words then not only will "the" not be considered
|
|
|
|
# again, neither will "there" or "wither", since they have "the"
|
|
|
|
# as substrings.
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
extractor = TextSplitter(words)
|
|
|
|
counter = collections.Counter()
|
|
|
|
for t in texts:
|
|
|
|
for (found, word) in extractor.iter_words(t):
|
|
|
|
if not found:
|
2021-04-09 08:36:26 -04:00
|
|
|
for substr in iter_substrings(word, minlen=3, maxlen=9):
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
counter[substr] += 1
|
|
|
|
|
2020-09-16 08:58:55 -04:00
|
|
|
# Score the candidates we found. This is an empirical formula only,
|
|
|
|
# chosen for its effectiveness.
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
scores = sorted(
|
2021-04-09 08:36:26 -04:00
|
|
|
((s, (len(s) - 1) ** (occ + 4)) for (s, occ) in counter.items() if occ > 4),
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
key=lambda x: x[1],
|
|
|
|
reverse=True,
|
|
|
|
)
|
|
|
|
|
2021-04-09 08:36:26 -04:00
|
|
|
# Pick the one with the highest score.
|
|
|
|
if not scores:
|
2020-09-13 12:25:13 -04:00
|
|
|
break
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
|
2021-04-09 08:36:26 -04:00
|
|
|
word = scores[0][0]
|
|
|
|
|
2020-09-16 08:58:55 -04:00
|
|
|
# If we can successfully add it to the dictionary, do so. Otherwise,
|
|
|
|
# we've filled the dictionary to capacity and are done.
|
2020-09-13 12:25:13 -04:00
|
|
|
if sum_len + len(word) - 2 > max_words_len:
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
break
|
|
|
|
if len(words) == max_words:
|
|
|
|
break
|
2020-09-13 12:25:13 -04:00
|
|
|
words.append(word)
|
|
|
|
sum_len += len(word) - 2
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
|
|
|
|
extractor = TextSplitter(words)
|
|
|
|
counter = collections.Counter()
|
|
|
|
for t in texts:
|
|
|
|
for atom in extractor.iter(t):
|
|
|
|
counter[atom] += 1
|
|
|
|
cb = huffman.codebook(counter.items())
|
|
|
|
|
2020-09-13 12:25:13 -04:00
|
|
|
word_start = start_unused
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
word_end = word_start + len(words) - 1
|
|
|
|
print("// # words", len(words))
|
|
|
|
print("// words", words)
|
add bigram compression to makeqstrdata
Compress common unicode bigrams by making code points in the range
0x80 - 0xbf (inclusive) represent them. Then, they can be greedily
encoded and the substituted code points handled by the existing Huffman
compression. Normally code points in the range 0x80-0xbf are not used
in Unicode, so we stake our own claim. Using the more arguably correct
"Private Use Area" (PUA) would mean that for scripts that only use
code points under 256 we would use more memory for the "values" table.
bigram means "two letters", and is also sometimes called a "digram".
It's nothing to do with "big RAM". For our purposes, a bigram represents
two successive unicode code points, so for instance in our build on
trinket m0 for english the most frequent are:
['t ', 'e ', 'in', 'd ', ...].
The bigrams are selected based on frequency in the corpus, but the
selection is not necessarily optimal, for these reasons I can think of:
* Suppose the corpus was just "tea" repeated 100 times. The
top bigrams would be "te", and "ea". However,
overlap, "te" could never be used. Thus, some bigrams might actually
waste space
* I _assume_ this has to be why e.g., bigram 0x86 "s " is more
frequent than bigram 0x85 " a" in English for Trinket M0, because
sequences like "can't add" would get the "t " digram and then
be unable to use the " a" digram.
* And generally, if a bigram is frequent then so are its constituents.
Say that "i" and "n" both encode to just 5 or 6 bits, then the huffman
code for "in" had better compress to 10 or fewer bits or it's a net
loss!
* I checked though! "i" is 5 bits, "n" is 6 bits (lucky guess)
but the bigram 0x83 also just 6 bits, so this one is a win of
5 bits for every "it" minus overhead. Yay, this round goes to team
compression.
* On the other hand, the least frequent bigram 0x9d " n" is 10 bits
long and its constituent code points are 4+6 bits so there's no
savings, but there is the cost of the table entry.
* and somehow 0x9f 'an' is never used at all!
With or without accounting for overlaps, there is some optimum number
of bigrams. Adding one more bigram uses at least 2 bytes (for the
entry in the bigram table; 4 bytes if code points >255 are in the
source text) and also needs a slot in the Huffman dictionary, so
adding bigrams beyond the optimim number makes compression worse again.
If it's an improvement, the fact that it's not guaranteed optimal
doesn't seem to matter too much. It just leaves a little more fruit
for the next sweep to pick up. Perhaps try adding the most frequent
bigram not yet present, until it doesn't improve compression overall.
Right now, de_DE is again the "fullest" build on trinket_m0. (It's
reclaimed that spot from the ja translation somehow) This change saves
104 bytes there, increasing free space about 6.8%. In the larger
(but not critically full) pyportal build it saves 324 bytes.
The specific number of bigrams used (32) was chosen as it is the max
number that fit within the 0x80..0xbf range. Larger tables would
require the use of 16 bit code points in the de_DE build, losing savings
overall.
(Side note: The most frequent letters in English have been said
to be: ETA OIN SHRDLU; but we have UAC EIL MOPRST in our corpus)
2020-09-01 18:12:22 -04:00
|
|
|
|
translation: Compress as unicode, not bytes
By treating each unicode code-point as a single entity for huffman
compression, the overall compression rate can be somewhat improved
without changing the algorithm. On the decompression side, when
compressed values above 127 are encountered, they need to be
converted from a 16-bit Unicode code point into a UTF-8 byte
sequence.
Doing this returns approximately 1.5kB of flash storage with the
zh_Latn_pinyin translation. (292 -> 1768 bytes remaining in my build
of trinket_m0)
Other "more ASCII" translations benefit less, and in fact
zh_Latn_pinyin is no longer the most constrained translation!
(de_DE 1156 -> 1384 bytes free in flash, I didn't check others
before pushing for CI)
English is slightly pessimized, 2840 -> 2788 bytes, probably mostly
because the "values" array was changed from uint8_t to uint16_t,
which is strictly not required for an all-ASCII translation. This
could probably be avoided in this case, but as English is not the
most constrained translation it doesn't really matter.
Testing performed: built for feather nRF52840 express and trinket m0
in English and zh_Latn_pinyin; ran and verified the localized
messages such as
Àn xià rènhé jiàn jìnrù REPL. Shǐyòng CTRL-D chóngxīn jiāzài.
and
Press any key to enter the REPL. Use CTRL-D to reload.
were properly displayed.
2019-12-02 09:41:03 -05:00
|
|
|
values = []
|
2018-08-15 21:32:37 -04:00
|
|
|
length_count = {}
|
|
|
|
renumbered = 0
|
2020-09-13 12:25:13 -04:00
|
|
|
last_length = None
|
2018-08-15 21:32:37 -04:00
|
|
|
canonical = {}
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
for atom, code in sorted(cb.items(), key=lambda x: (len(x[1]), x[0])):
|
|
|
|
values.append(atom)
|
2020-09-13 12:25:13 -04:00
|
|
|
length = len(code)
|
|
|
|
if length not in length_count:
|
|
|
|
length_count[length] = 0
|
|
|
|
length_count[length] += 1
|
|
|
|
if last_length:
|
2021-03-15 09:57:36 -04:00
|
|
|
renumbered <<= length - last_length
|
|
|
|
canonical[atom] = "{0:0{width}b}".format(renumbered, width=length)
|
2020-09-13 12:25:13 -04:00
|
|
|
# print(f"atom={repr(atom)} code={code}", file=sys.stderr)
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
if len(atom) > 1:
|
|
|
|
o = words.index(atom) + 0x80
|
|
|
|
s = "".join(C_ESCAPES.get(ch1, ch1) for ch1 in atom)
|
|
|
|
else:
|
|
|
|
s = C_ESCAPES.get(atom, atom)
|
|
|
|
o = ord(atom)
|
|
|
|
print("//", o, s, counter[atom], canonical[atom], renumbered)
|
2018-08-15 21:32:37 -04:00
|
|
|
renumbered += 1
|
2020-09-13 12:25:13 -04:00
|
|
|
last_length = length
|
2018-08-15 21:32:37 -04:00
|
|
|
lengths = bytearray()
|
translation: Compress as unicode, not bytes
By treating each unicode code-point as a single entity for huffman
compression, the overall compression rate can be somewhat improved
without changing the algorithm. On the decompression side, when
compressed values above 127 are encountered, they need to be
converted from a 16-bit Unicode code point into a UTF-8 byte
sequence.
Doing this returns approximately 1.5kB of flash storage with the
zh_Latn_pinyin translation. (292 -> 1768 bytes remaining in my build
of trinket_m0)
Other "more ASCII" translations benefit less, and in fact
zh_Latn_pinyin is no longer the most constrained translation!
(de_DE 1156 -> 1384 bytes free in flash, I didn't check others
before pushing for CI)
English is slightly pessimized, 2840 -> 2788 bytes, probably mostly
because the "values" array was changed from uint8_t to uint16_t,
which is strictly not required for an all-ASCII translation. This
could probably be avoided in this case, but as English is not the
most constrained translation it doesn't really matter.
Testing performed: built for feather nRF52840 express and trinket m0
in English and zh_Latn_pinyin; ran and verified the localized
messages such as
Àn xià rènhé jiàn jìnrù REPL. Shǐyòng CTRL-D chóngxīn jiāzài.
and
Press any key to enter the REPL. Use CTRL-D to reload.
were properly displayed.
2019-12-02 09:41:03 -05:00
|
|
|
print("// length count", length_count)
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
|
translation: Compress as unicode, not bytes
By treating each unicode code-point as a single entity for huffman
compression, the overall compression rate can be somewhat improved
without changing the algorithm. On the decompression side, when
compressed values above 127 are encountered, they need to be
converted from a 16-bit Unicode code point into a UTF-8 byte
sequence.
Doing this returns approximately 1.5kB of flash storage with the
zh_Latn_pinyin translation. (292 -> 1768 bytes remaining in my build
of trinket_m0)
Other "more ASCII" translations benefit less, and in fact
zh_Latn_pinyin is no longer the most constrained translation!
(de_DE 1156 -> 1384 bytes free in flash, I didn't check others
before pushing for CI)
English is slightly pessimized, 2840 -> 2788 bytes, probably mostly
because the "values" array was changed from uint8_t to uint16_t,
which is strictly not required for an all-ASCII translation. This
could probably be avoided in this case, but as English is not the
most constrained translation it doesn't really matter.
Testing performed: built for feather nRF52840 express and trinket m0
in English and zh_Latn_pinyin; ran and verified the localized
messages such as
Àn xià rènhé jiàn jìnrù REPL. Shǐyòng CTRL-D chóngxīn jiāzài.
and
Press any key to enter the REPL. Use CTRL-D to reload.
were properly displayed.
2019-12-02 09:41:03 -05:00
|
|
|
for i in range(1, max(length_count) + 2):
|
2018-08-15 21:32:37 -04:00
|
|
|
lengths.append(length_count.get(i, 0))
|
translation: Compress as unicode, not bytes
By treating each unicode code-point as a single entity for huffman
compression, the overall compression rate can be somewhat improved
without changing the algorithm. On the decompression side, when
compressed values above 127 are encountered, they need to be
converted from a 16-bit Unicode code point into a UTF-8 byte
sequence.
Doing this returns approximately 1.5kB of flash storage with the
zh_Latn_pinyin translation. (292 -> 1768 bytes remaining in my build
of trinket_m0)
Other "more ASCII" translations benefit less, and in fact
zh_Latn_pinyin is no longer the most constrained translation!
(de_DE 1156 -> 1384 bytes free in flash, I didn't check others
before pushing for CI)
English is slightly pessimized, 2840 -> 2788 bytes, probably mostly
because the "values" array was changed from uint8_t to uint16_t,
which is strictly not required for an all-ASCII translation. This
could probably be avoided in this case, but as English is not the
most constrained translation it doesn't really matter.
Testing performed: built for feather nRF52840 express and trinket m0
in English and zh_Latn_pinyin; ran and verified the localized
messages such as
Àn xià rènhé jiàn jìnrù REPL. Shǐyòng CTRL-D chóngxīn jiāzài.
and
Press any key to enter the REPL. Use CTRL-D to reload.
were properly displayed.
2019-12-02 09:41:03 -05:00
|
|
|
print("// values", values, "lengths", len(lengths), lengths)
|
2020-09-13 12:25:13 -04:00
|
|
|
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
print("//", values, lengths)
|
|
|
|
values = [(atom if len(atom) == 1 else chr(0x80 + words.index(atom))) for atom in values]
|
2018-08-15 21:32:37 -04:00
|
|
|
print("//", values, lengths)
|
2020-09-13 12:25:13 -04:00
|
|
|
max_translation_encoded_length = max(
|
2021-03-15 09:57:36 -04:00
|
|
|
len(translation.encode("utf-8")) for (original, translation) in translations
|
|
|
|
)
|
2020-09-13 12:25:13 -04:00
|
|
|
|
|
|
|
wends = list(len(w) - 2 for w in words)
|
|
|
|
for i in range(1, len(wends)):
|
|
|
|
wends[i] += wends[i - 1]
|
|
|
|
|
2018-08-15 21:32:37 -04:00
|
|
|
with open(compression_filename, "w") as f:
|
|
|
|
f.write("const uint8_t lengths[] = {{ {} }};\n".format(", ".join(map(str, lengths))))
|
2021-03-15 09:57:36 -04:00
|
|
|
f.write(
|
|
|
|
"const {} values[] = {{ {} }};\n".format(
|
|
|
|
values_type, ", ".join(str(ord(u)) for u in values)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
f.write(
|
|
|
|
"#define compress_max_length_bits ({})\n".format(
|
|
|
|
max_translation_encoded_length.bit_length()
|
|
|
|
)
|
|
|
|
)
|
|
|
|
f.write(
|
|
|
|
"const {} words[] = {{ {} }};\n".format(
|
|
|
|
values_type, ", ".join(str(ord(c)) for w in words for c in w)
|
|
|
|
)
|
|
|
|
)
|
2020-09-13 12:25:13 -04:00
|
|
|
f.write("const uint8_t wends[] = {{ {} }};\n".format(", ".join(str(p) for p in wends)))
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
f.write("#define word_start {}\n".format(word_start))
|
|
|
|
f.write("#define word_end {}\n".format(word_end))
|
|
|
|
|
2020-09-13 12:25:13 -04:00
|
|
|
return (values, lengths, words, canonical, extractor)
|
2018-08-15 21:32:37 -04:00
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
|
2020-05-28 08:40:56 -04:00
|
|
|
def decompress(encoding_table, encoded, encoded_length_bits):
|
2020-09-13 12:25:13 -04:00
|
|
|
(values, lengths, words, _, _) = encoding_table
|
translation: Compress as unicode, not bytes
By treating each unicode code-point as a single entity for huffman
compression, the overall compression rate can be somewhat improved
without changing the algorithm. On the decompression side, when
compressed values above 127 are encountered, they need to be
converted from a 16-bit Unicode code point into a UTF-8 byte
sequence.
Doing this returns approximately 1.5kB of flash storage with the
zh_Latn_pinyin translation. (292 -> 1768 bytes remaining in my build
of trinket_m0)
Other "more ASCII" translations benefit less, and in fact
zh_Latn_pinyin is no longer the most constrained translation!
(de_DE 1156 -> 1384 bytes free in flash, I didn't check others
before pushing for CI)
English is slightly pessimized, 2840 -> 2788 bytes, probably mostly
because the "values" array was changed from uint8_t to uint16_t,
which is strictly not required for an all-ASCII translation. This
could probably be avoided in this case, but as English is not the
most constrained translation it doesn't really matter.
Testing performed: built for feather nRF52840 express and trinket m0
in English and zh_Latn_pinyin; ran and verified the localized
messages such as
Àn xià rènhé jiàn jìnrù REPL. Shǐyòng CTRL-D chóngxīn jiāzài.
and
Press any key to enter the REPL. Use CTRL-D to reload.
were properly displayed.
2019-12-02 09:41:03 -05:00
|
|
|
dec = []
|
2018-08-15 21:32:37 -04:00
|
|
|
this_byte = 0
|
|
|
|
this_bit = 7
|
|
|
|
b = encoded[this_byte]
|
2020-05-28 08:40:56 -04:00
|
|
|
bits = 0
|
|
|
|
for i in range(encoded_length_bits):
|
|
|
|
bits <<= 1
|
|
|
|
if 0x80 & b:
|
|
|
|
bits |= 1
|
|
|
|
|
|
|
|
b <<= 1
|
|
|
|
if this_bit == 0:
|
|
|
|
this_bit = 7
|
|
|
|
this_byte += 1
|
|
|
|
if this_byte < len(encoded):
|
|
|
|
b = encoded[this_byte]
|
|
|
|
else:
|
|
|
|
this_bit -= 1
|
|
|
|
length = bits
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
while i < length:
|
2018-08-15 21:32:37 -04:00
|
|
|
bits = 0
|
|
|
|
bit_length = 0
|
|
|
|
max_code = lengths[0]
|
|
|
|
searched_length = lengths[0]
|
|
|
|
while True:
|
|
|
|
bits <<= 1
|
|
|
|
if 0x80 & b:
|
|
|
|
bits |= 1
|
|
|
|
|
|
|
|
b <<= 1
|
|
|
|
bit_length += 1
|
|
|
|
if this_bit == 0:
|
|
|
|
this_bit = 7
|
|
|
|
this_byte += 1
|
|
|
|
if this_byte < len(encoded):
|
|
|
|
b = encoded[this_byte]
|
|
|
|
else:
|
|
|
|
this_bit -= 1
|
|
|
|
if max_code > 0 and bits < max_code:
|
2021-03-15 09:57:36 -04:00
|
|
|
# print('{0:0{width}b}'.format(bits, width=bit_length))
|
2018-08-15 21:32:37 -04:00
|
|
|
break
|
|
|
|
max_code = (max_code << 1) + lengths[bit_length]
|
|
|
|
searched_length += lengths[bit_length]
|
|
|
|
|
|
|
|
v = values[searched_length + bits - max_code]
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
if v >= chr(0x80) and v < chr(0x80 + len(words)):
|
|
|
|
v = words[ord(v) - 0x80]
|
2021-03-15 09:57:36 -04:00
|
|
|
i += len(v.encode("utf-8"))
|
translation: Compress as unicode, not bytes
By treating each unicode code-point as a single entity for huffman
compression, the overall compression rate can be somewhat improved
without changing the algorithm. On the decompression side, when
compressed values above 127 are encountered, they need to be
converted from a 16-bit Unicode code point into a UTF-8 byte
sequence.
Doing this returns approximately 1.5kB of flash storage with the
zh_Latn_pinyin translation. (292 -> 1768 bytes remaining in my build
of trinket_m0)
Other "more ASCII" translations benefit less, and in fact
zh_Latn_pinyin is no longer the most constrained translation!
(de_DE 1156 -> 1384 bytes free in flash, I didn't check others
before pushing for CI)
English is slightly pessimized, 2840 -> 2788 bytes, probably mostly
because the "values" array was changed from uint8_t to uint16_t,
which is strictly not required for an all-ASCII translation. This
could probably be avoided in this case, but as English is not the
most constrained translation it doesn't really matter.
Testing performed: built for feather nRF52840 express and trinket m0
in English and zh_Latn_pinyin; ran and verified the localized
messages such as
Àn xià rènhé jiàn jìnrù REPL. Shǐyòng CTRL-D chóngxīn jiāzài.
and
Press any key to enter the REPL. Use CTRL-D to reload.
were properly displayed.
2019-12-02 09:41:03 -05:00
|
|
|
dec.append(v)
|
2021-03-15 09:57:36 -04:00
|
|
|
return "".join(dec)
|
|
|
|
|
2018-08-15 21:32:37 -04:00
|
|
|
|
2020-05-28 08:40:56 -04:00
|
|
|
def compress(encoding_table, decompressed, encoded_length_bits, len_translation_encoded):
|
translation: Compress as unicode, not bytes
By treating each unicode code-point as a single entity for huffman
compression, the overall compression rate can be somewhat improved
without changing the algorithm. On the decompression side, when
compressed values above 127 are encountered, they need to be
converted from a 16-bit Unicode code point into a UTF-8 byte
sequence.
Doing this returns approximately 1.5kB of flash storage with the
zh_Latn_pinyin translation. (292 -> 1768 bytes remaining in my build
of trinket_m0)
Other "more ASCII" translations benefit less, and in fact
zh_Latn_pinyin is no longer the most constrained translation!
(de_DE 1156 -> 1384 bytes free in flash, I didn't check others
before pushing for CI)
English is slightly pessimized, 2840 -> 2788 bytes, probably mostly
because the "values" array was changed from uint8_t to uint16_t,
which is strictly not required for an all-ASCII translation. This
could probably be avoided in this case, but as English is not the
most constrained translation it doesn't really matter.
Testing performed: built for feather nRF52840 express and trinket m0
in English and zh_Latn_pinyin; ran and verified the localized
messages such as
Àn xià rènhé jiàn jìnrù REPL. Shǐyòng CTRL-D chóngxīn jiāzài.
and
Press any key to enter the REPL. Use CTRL-D to reload.
were properly displayed.
2019-12-02 09:41:03 -05:00
|
|
|
if not isinstance(decompressed, str):
|
2018-08-15 21:32:37 -04:00
|
|
|
raise TypeError()
|
2020-09-13 12:25:13 -04:00
|
|
|
(_, _, _, canonical, extractor) = encoding_table
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
|
translation: Compress as unicode, not bytes
By treating each unicode code-point as a single entity for huffman
compression, the overall compression rate can be somewhat improved
without changing the algorithm. On the decompression side, when
compressed values above 127 are encountered, they need to be
converted from a 16-bit Unicode code point into a UTF-8 byte
sequence.
Doing this returns approximately 1.5kB of flash storage with the
zh_Latn_pinyin translation. (292 -> 1768 bytes remaining in my build
of trinket_m0)
Other "more ASCII" translations benefit less, and in fact
zh_Latn_pinyin is no longer the most constrained translation!
(de_DE 1156 -> 1384 bytes free in flash, I didn't check others
before pushing for CI)
English is slightly pessimized, 2840 -> 2788 bytes, probably mostly
because the "values" array was changed from uint8_t to uint16_t,
which is strictly not required for an all-ASCII translation. This
could probably be avoided in this case, but as English is not the
most constrained translation it doesn't really matter.
Testing performed: built for feather nRF52840 express and trinket m0
in English and zh_Latn_pinyin; ran and verified the localized
messages such as
Àn xià rènhé jiàn jìnrù REPL. Shǐyòng CTRL-D chóngxīn jiāzài.
and
Press any key to enter the REPL. Use CTRL-D to reload.
were properly displayed.
2019-12-02 09:41:03 -05:00
|
|
|
enc = bytearray(len(decompressed) * 3)
|
2018-08-15 21:32:37 -04:00
|
|
|
current_bit = 7
|
|
|
|
current_byte = 0
|
2020-05-28 08:40:56 -04:00
|
|
|
|
2020-09-13 12:25:13 -04:00
|
|
|
bits = encoded_length_bits + 1
|
2020-05-28 08:40:56 -04:00
|
|
|
for i in range(bits - 1, 0, -1):
|
|
|
|
if len_translation_encoded & (1 << (i - 1)):
|
|
|
|
enc[current_byte] |= 1 << current_bit
|
|
|
|
if current_bit == 0:
|
|
|
|
current_bit = 7
|
|
|
|
current_byte += 1
|
|
|
|
else:
|
|
|
|
current_bit -= 1
|
|
|
|
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
for atom in extractor.iter(decompressed):
|
2020-09-13 12:25:13 -04:00
|
|
|
for b in canonical[atom]:
|
|
|
|
if b == "1":
|
2018-08-15 21:32:37 -04:00
|
|
|
enc[current_byte] |= 1 << current_bit
|
|
|
|
if current_bit == 0:
|
|
|
|
current_bit = 7
|
|
|
|
current_byte += 1
|
|
|
|
else:
|
|
|
|
current_bit -= 1
|
2020-09-13 12:25:13 -04:00
|
|
|
|
2018-08-15 21:32:37 -04:00
|
|
|
if current_bit != 7:
|
|
|
|
current_byte += 1
|
|
|
|
return enc[:current_byte]
|
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
|
2016-01-31 07:59:59 -05:00
|
|
|
def qstr_escape(qst):
|
2016-04-13 17:12:39 -04:00
|
|
|
def esc_char(m):
|
|
|
|
c = ord(m.group(0))
|
|
|
|
try:
|
|
|
|
name = codepoint2name[c]
|
|
|
|
except KeyError:
|
2021-03-15 09:57:36 -04:00
|
|
|
name = "0x%02x" % c
|
|
|
|
return "_" + name + "_"
|
|
|
|
|
|
|
|
return re.sub(r"[^A-Za-z0-9_]", esc_char, qst)
|
|
|
|
|
2016-01-31 07:59:59 -05:00
|
|
|
|
|
|
|
def parse_input_headers(infiles):
|
2014-01-21 16:40:13 -05:00
|
|
|
# read the qstrs in from the input files
|
2015-01-11 12:52:45 -05:00
|
|
|
qcfgs = {}
|
2014-01-23 17:22:00 -05:00
|
|
|
qstrs = {}
|
2018-07-31 19:53:54 -04:00
|
|
|
i18ns = set()
|
2014-01-21 16:40:13 -05:00
|
|
|
for infile in infiles:
|
2021-03-15 09:57:36 -04:00
|
|
|
with open(infile, "rt") as f:
|
2014-01-21 16:40:13 -05:00
|
|
|
for line in f:
|
2015-01-11 12:52:45 -05:00
|
|
|
line = line.strip()
|
|
|
|
|
|
|
|
# is this a config line?
|
2021-03-15 09:57:36 -04:00
|
|
|
match = re.match(r"^QCFG\((.+), (.+)\)", line)
|
2015-01-11 12:52:45 -05:00
|
|
|
if match:
|
|
|
|
value = match.group(2)
|
2021-03-15 09:57:36 -04:00
|
|
|
if value[0] == "(" and value[-1] == ")":
|
2015-01-11 12:52:45 -05:00
|
|
|
# strip parenthesis from config value
|
|
|
|
value = value[1:-1]
|
|
|
|
qcfgs[match.group(1)] = value
|
|
|
|
continue
|
|
|
|
|
2018-07-31 19:53:54 -04:00
|
|
|
match = re.match(r'^TRANSLATE\("(.*)"\)$', line)
|
|
|
|
if match:
|
|
|
|
i18ns.add(match.group(1))
|
|
|
|
continue
|
|
|
|
|
2014-05-02 15:10:47 -04:00
|
|
|
# is this a QSTR line?
|
2021-03-15 09:57:36 -04:00
|
|
|
match = re.match(r"^Q\((.*)\)$", line)
|
2014-05-02 15:10:47 -04:00
|
|
|
if not match:
|
2014-04-13 08:16:51 -04:00
|
|
|
continue
|
2014-01-21 16:40:13 -05:00
|
|
|
|
|
|
|
# get the qstr value
|
|
|
|
qstr = match.group(1)
|
2016-04-14 10:22:36 -04:00
|
|
|
|
|
|
|
# special case to specify control characters
|
2021-03-15 09:57:36 -04:00
|
|
|
if qstr == "\\n":
|
|
|
|
qstr = "\n"
|
2016-04-14 10:22:36 -04:00
|
|
|
|
|
|
|
# work out the corresponding qstr name
|
2016-01-31 07:59:59 -05:00
|
|
|
ident = qstr_escape(qstr)
|
2014-01-21 16:40:13 -05:00
|
|
|
|
|
|
|
# don't add duplicates
|
2014-01-23 17:22:00 -05:00
|
|
|
if ident in qstrs:
|
2014-01-21 16:40:13 -05:00
|
|
|
continue
|
|
|
|
|
2014-01-24 17:22:00 -05:00
|
|
|
# add the qstr to the list, with order number to retain original order in file
|
2017-10-21 04:06:32 -04:00
|
|
|
order = len(qstrs)
|
|
|
|
# but put special method names like __add__ at the top of list, so
|
|
|
|
# that their id's fit into a byte
|
|
|
|
if ident == "":
|
|
|
|
# Sort empty qstr above all still
|
|
|
|
order = -200000
|
2018-05-10 09:10:46 -04:00
|
|
|
elif ident == "__dir__":
|
|
|
|
# Put __dir__ after empty qstr for builtin dir() to work
|
|
|
|
order = -190000
|
2017-10-21 04:06:32 -04:00
|
|
|
elif ident.startswith("__"):
|
|
|
|
order -= 100000
|
|
|
|
qstrs[ident] = (order, ident, qstr)
|
2014-01-21 16:40:13 -05:00
|
|
|
|
2018-07-31 19:53:54 -04:00
|
|
|
if not qcfgs and qstrs:
|
2015-10-11 04:09:57 -04:00
|
|
|
sys.stderr.write("ERROR: Empty preprocessor output - check for errors above\n")
|
|
|
|
sys.exit(1)
|
|
|
|
|
2018-07-31 19:53:54 -04:00
|
|
|
return qcfgs, qstrs, i18ns
|
2016-01-31 07:59:59 -05:00
|
|
|
|
2021-04-06 12:33:03 -04:00
|
|
|
def escape_bytes(qstr):
|
|
|
|
if all(32 <= ord(c) <= 126 and c != "\\" and c != '"' for c in qstr):
|
|
|
|
# qstr is all printable ASCII so render it as-is (for easier debugging)
|
|
|
|
return qstr
|
|
|
|
else:
|
|
|
|
# qstr contains non-printable codes so render entire thing as hex pairs
|
|
|
|
qbytes = bytes_cons(qstr, "utf8")
|
|
|
|
return "".join(("\\x%02x" % b) for b in qbytes)
|
2021-03-15 09:57:36 -04:00
|
|
|
|
2016-01-31 07:59:59 -05:00
|
|
|
def make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr):
|
2021-03-15 09:57:36 -04:00
|
|
|
qbytes = bytes_cons(qstr, "utf8")
|
2016-09-02 00:32:47 -04:00
|
|
|
qlen = len(qbytes)
|
|
|
|
qhash = compute_hash(qbytes, cfg_bytes_hash)
|
2016-01-31 07:59:59 -05:00
|
|
|
if qlen >= (1 << (8 * cfg_bytes_len)):
|
2021-03-15 09:57:36 -04:00
|
|
|
print("qstr is too long:", qstr)
|
2016-01-31 07:59:59 -05:00
|
|
|
assert False
|
2021-04-06 12:33:03 -04:00
|
|
|
qdata = escape_bytes(qstr)
|
|
|
|
return '%d, %d, "%s"' % (qhash, qlen, qdata)
|
2016-01-31 07:59:59 -05:00
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
|
2018-08-15 21:32:37 -04:00
|
|
|
def print_qstr_data(encoding_table, qcfgs, qstrs, i18ns):
|
2015-01-11 17:27:30 -05:00
|
|
|
# get config variables
|
2021-03-15 09:57:36 -04:00
|
|
|
cfg_bytes_len = int(qcfgs["BYTES_IN_LEN"])
|
|
|
|
cfg_bytes_hash = int(qcfgs["BYTES_IN_HASH"])
|
2015-01-11 17:27:30 -05:00
|
|
|
|
2015-07-31 07:57:36 -04:00
|
|
|
# print out the starter of the generated C header file
|
2021-03-15 09:57:36 -04:00
|
|
|
print("// This file was automatically generated by makeqstrdata.py")
|
|
|
|
print("")
|
2015-01-11 17:27:30 -05:00
|
|
|
|
2015-01-11 12:52:45 -05:00
|
|
|
# add NULL qstr with no hash or data
|
2021-04-06 12:33:03 -04:00
|
|
|
print('QDEF(MP_QSTR_NULL, 0, 0, "")')
|
2015-01-11 17:27:30 -05:00
|
|
|
|
2018-07-31 19:53:54 -04:00
|
|
|
total_qstr_size = 0
|
2018-08-15 21:32:37 -04:00
|
|
|
total_qstr_compressed_size = 0
|
2015-01-11 17:27:30 -05:00
|
|
|
# go through each qstr and print it out
|
2014-04-11 13:36:08 -04:00
|
|
|
for order, ident, qstr in sorted(qstrs.values(), key=lambda x: x[0]):
|
2016-01-31 07:59:59 -05:00
|
|
|
qbytes = make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr)
|
2021-03-15 09:57:36 -04:00
|
|
|
print("QDEF(MP_QSTR_%s, %s)" % (ident, qbytes))
|
2018-07-31 19:53:54 -04:00
|
|
|
total_qstr_size += len(qstr)
|
|
|
|
|
|
|
|
total_text_size = 0
|
2018-08-15 21:32:37 -04:00
|
|
|
total_text_compressed_size = 0
|
2021-03-15 09:57:36 -04:00
|
|
|
max_translation_encoded_length = max(
|
|
|
|
len(translation.encode("utf-8")) for original, translation in i18ns
|
|
|
|
)
|
2020-05-28 08:40:56 -04:00
|
|
|
encoded_length_bits = max_translation_encoded_length.bit_length()
|
2018-07-31 19:53:54 -04:00
|
|
|
for original, translation in i18ns:
|
2018-08-15 21:32:37 -04:00
|
|
|
translation_encoded = translation.encode("utf-8")
|
2021-03-15 09:57:36 -04:00
|
|
|
compressed = compress(
|
|
|
|
encoding_table, translation, encoded_length_bits, len(translation_encoded)
|
|
|
|
)
|
2018-08-15 21:32:37 -04:00
|
|
|
total_text_compressed_size += len(compressed)
|
2020-05-28 08:40:56 -04:00
|
|
|
decompressed = decompress(encoding_table, compressed, encoded_length_bits)
|
|
|
|
assert decompressed == translation
|
2018-08-15 21:32:37 -04:00
|
|
|
for c in C_ESCAPES:
|
2018-11-09 19:41:08 -05:00
|
|
|
decompressed = decompressed.replace(c, C_ESCAPES[c])
|
2021-03-15 09:57:36 -04:00
|
|
|
print(
|
|
|
|
'TRANSLATION("{}", {}) // {}'.format(
|
|
|
|
original, ", ".join(["{:d}".format(x) for x in compressed]), decompressed
|
|
|
|
)
|
|
|
|
)
|
2018-08-15 21:32:37 -04:00
|
|
|
total_text_size += len(translation.encode("utf-8"))
|
2016-01-31 07:59:59 -05:00
|
|
|
|
2018-07-31 19:53:54 -04:00
|
|
|
print()
|
|
|
|
print("// {} bytes worth of qstr".format(total_qstr_size))
|
|
|
|
print("// {} bytes worth of translations".format(total_text_size))
|
2018-08-15 21:32:37 -04:00
|
|
|
print("// {} bytes worth of translations compressed".format(total_text_compressed_size))
|
|
|
|
print("// {} bytes saved".format(total_text_size - total_text_compressed_size))
|
2018-07-31 19:53:54 -04:00
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
|
2018-07-31 19:53:54 -04:00
|
|
|
def print_qstr_enums(qstrs):
|
|
|
|
# print out the starter of the generated C header file
|
2021-03-15 09:57:36 -04:00
|
|
|
print("// This file was automatically generated by makeqstrdata.py")
|
|
|
|
print("")
|
2018-07-31 19:53:54 -04:00
|
|
|
|
|
|
|
# add NULL qstr with no hash or data
|
2021-03-15 09:57:36 -04:00
|
|
|
print("QENUM(MP_QSTR_NULL)")
|
2018-07-31 19:53:54 -04:00
|
|
|
|
|
|
|
# go through each qstr and print it out
|
|
|
|
for order, ident, qstr in sorted(qstrs.values(), key=lambda x: x[0]):
|
2021-03-15 09:57:36 -04:00
|
|
|
print("QENUM(MP_QSTR_%s)" % (ident,))
|
|
|
|
|
2014-01-21 16:40:13 -05:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2018-07-31 19:53:54 -04:00
|
|
|
import argparse
|
|
|
|
|
2021-03-15 09:57:36 -04:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description="Process QSTR definitions into headers for compilation"
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"infiles", metavar="N", type=str, nargs="+", help="an integer for the accumulator"
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--translation", default=None, type=str, help="translations for i18n() items"
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--compression_filename", default=None, type=str, help="header for compression info"
|
|
|
|
)
|
2018-07-31 19:53:54 -04:00
|
|
|
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
qcfgs, qstrs, i18ns = parse_input_headers(args.infiles)
|
|
|
|
if args.translation:
|
2020-05-28 08:40:56 -04:00
|
|
|
i18ns = sorted(i18ns)
|
2018-07-31 19:53:54 -04:00
|
|
|
translations = translate(args.translation, i18ns)
|
compression: Implement ciscorn's dictionary approach
Massive savings. Thanks so much @ciscorn for providing the initial
code for choosing the dictionary.
This adds a bit of time to the build, both to find the dictionary
but also because (for reasons I don't fully understand), the binary
search in the compress() function no longer worked and had to be
replaced with a linear search.
I think this is because the intended invariant is that for codebook
entries that encode to the same number of bits, the entries are ordered
in ascending value. However, I mis-placed the transition from "words"
to "byte/char values" so the codebook entries for words are in word-order
rather than their code order.
Because this price is only paid at build time, I didn't care to determine
exactly where the correct fix was.
I also commented out a line to produce the "estimated total memory size"
-- at least on the unix build with TRANSLATION=ja, this led to a build
time KeyError trying to compute the codebook size for all the strings.
I think this occurs because some single unicode code point ('ァ') is
no longer present as itself in the compressed strings, due to always
being replaced by a word.
As promised, this seems to save hundreds of bytes in the German translation
on the trinket m0.
Testing performed:
- built trinket_m0 in several languages
- built and ran unix port in several languages (en, de_DE, ja) and ran
simple error-producing codes like ./micropython -c '1/0'
2020-09-12 11:10:18 -04:00
|
|
|
encoding_table = compute_huffman_coding(translations, args.compression_filename)
|
2018-08-15 21:32:37 -04:00
|
|
|
print_qstr_data(encoding_table, qcfgs, qstrs, translations)
|
2018-07-31 19:53:54 -04:00
|
|
|
else:
|
|
|
|
print_qstr_enums(qstrs)
|