From 07740d19f3fd647979b223b8ffffcb6776c613e3 Mon Sep 17 00:00:00 2001 From: Jeff Epler Date: Tue, 1 Sep 2020 17:12:22 -0500 Subject: [PATCH 1/6] add bigram compression to makeqstrdata Compress common unicode bigrams by making code points in the range 0x80 - 0xbf (inclusive) represent them. Then, they can be greedily encoded and the substituted code points handled by the existing Huffman compression. Normally code points in the range 0x80-0xbf are not used in Unicode, so we stake our own claim. Using the more arguably correct "Private Use Area" (PUA) would mean that for scripts that only use code points under 256 we would use more memory for the "values" table. bigram means "two letters", and is also sometimes called a "digram". It's nothing to do with "big RAM". For our purposes, a bigram represents two successive unicode code points, so for instance in our build on trinket m0 for english the most frequent are: ['t ', 'e ', 'in', 'd ', ...]. The bigrams are selected based on frequency in the corpus, but the selection is not necessarily optimal, for these reasons I can think of: * Suppose the corpus was just "tea" repeated 100 times. The top bigrams would be "te", and "ea". However, overlap, "te" could never be used. Thus, some bigrams might actually waste space * I _assume_ this has to be why e.g., bigram 0x86 "s " is more frequent than bigram 0x85 " a" in English for Trinket M0, because sequences like "can't add" would get the "t " digram and then be unable to use the " a" digram. * And generally, if a bigram is frequent then so are its constituents. Say that "i" and "n" both encode to just 5 or 6 bits, then the huffman code for "in" had better compress to 10 or fewer bits or it's a net loss! * I checked though! "i" is 5 bits, "n" is 6 bits (lucky guess) but the bigram 0x83 also just 6 bits, so this one is a win of 5 bits for every "it" minus overhead. Yay, this round goes to team compression. * On the other hand, the least frequent bigram 0x9d " n" is 10 bits long and its constituent code points are 4+6 bits so there's no savings, but there is the cost of the table entry. * and somehow 0x9f 'an' is never used at all! With or without accounting for overlaps, there is some optimum number of bigrams. Adding one more bigram uses at least 2 bytes (for the entry in the bigram table; 4 bytes if code points >255 are in the source text) and also needs a slot in the Huffman dictionary, so adding bigrams beyond the optimim number makes compression worse again. If it's an improvement, the fact that it's not guaranteed optimal doesn't seem to matter too much. It just leaves a little more fruit for the next sweep to pick up. Perhaps try adding the most frequent bigram not yet present, until it doesn't improve compression overall. Right now, de_DE is again the "fullest" build on trinket_m0. (It's reclaimed that spot from the ja translation somehow) This change saves 104 bytes there, increasing free space about 6.8%. In the larger (but not critically full) pyportal build it saves 324 bytes. The specific number of bigrams used (32) was chosen as it is the max number that fit within the 0x80..0xbf range. Larger tables would require the use of 16 bit code points in the de_DE build, losing savings overall. (Side note: The most frequent letters in English have been said to be: ETA OIN SHRDLU; but we have UAC EIL MOPRST in our corpus) --- py/makeqstrdata.py | 33 +++++++++++++++++++++++++++++---- supervisor/shared/translate.c | 9 +++++++++ 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/py/makeqstrdata.py b/py/makeqstrdata.py index 04c8938766..28aed3df97 100644 --- a/py/makeqstrdata.py +++ b/py/makeqstrdata.py @@ -100,9 +100,30 @@ def translate(translation_file, i18ns): translations.append((original, translation)) return translations +def frequent_ngrams(corpus, sz, n): + return collections.Counter(corpus[i:i+sz] for i in range(len(corpus)-sz)).most_common(n) + +def ngrams_to_pua(translation, ngrams): + if len(ngrams) > 32: + start = 0xe000 + else: + start = 0x80 + for i, g in enumerate(ngrams): + translation = translation.replace(g, chr(start + i)) + return translation + +def pua_to_ngrams(compressed, ngrams): + if len(ngrams) > 32: + start, end = 0xe000, 0xf8ff + else: + start, end = 0x80, 0xbf + return "".join(ngrams[ord(c) - start] if (start <= ord(c) <= end) else c for c in compressed) + def compute_huffman_coding(translations, qstrs, compression_filename): all_strings = [x[1] for x in translations] all_strings_concat = "".join(all_strings) + ngrams = [i[0] for i in frequent_ngrams(all_strings_concat, 2, 32)] + all_strings_concat = ngrams_to_pua(all_strings_concat, ngrams) counts = collections.Counter(all_strings_concat) cb = huffman.codebook(counts.items()) values = [] @@ -128,7 +149,8 @@ def compute_huffman_coding(translations, qstrs, compression_filename): for i in range(1, max(length_count) + 2): lengths.append(length_count.get(i, 0)) print("// values", values, "lengths", len(lengths), lengths) - print("// estimated total memory size", len(lengths) + 2*len(values) + sum(len(cb[u]) for u in all_strings_concat)) + ngramdata = [ord(ni) for i in ngrams for ni in i] + print("// estimated total memory size", len(lengths) + 2*len(values) + 2 * len(ngramdata) + sum((len(cb[u]) + 7)//8 for u in all_strings_concat)) print("//", values, lengths) values_type = "uint16_t" if max(ord(u) for u in values) > 255 else "uint8_t" max_translation_encoded_length = max(len(translation.encode("utf-8")) for original,translation in translations) @@ -136,10 +158,11 @@ def compute_huffman_coding(translations, qstrs, compression_filename): f.write("const uint8_t lengths[] = {{ {} }};\n".format(", ".join(map(str, lengths)))) f.write("const {} values[] = {{ {} }};\n".format(values_type, ", ".join(str(ord(u)) for u in values))) f.write("#define compress_max_length_bits ({})\n".format(max_translation_encoded_length.bit_length())) - return values, lengths + f.write("const {} ngrams[] = {{ {} }};\n".format(values_type, ", ".join(str(u) for u in ngramdata))) + return values, lengths, ngrams def decompress(encoding_table, encoded, encoded_length_bits): - values, lengths = encoding_table + values, lengths, ngrams = encoding_table dec = [] this_byte = 0 this_bit = 7 @@ -187,6 +210,7 @@ def decompress(encoding_table, encoded, encoded_length_bits): searched_length += lengths[bit_length] v = values[searched_length + bits - max_code] + v = pua_to_ngrams(v, ngrams) i += len(v.encode('utf-8')) dec.append(v) return ''.join(dec) @@ -194,7 +218,8 @@ def decompress(encoding_table, encoded, encoded_length_bits): def compress(encoding_table, decompressed, encoded_length_bits, len_translation_encoded): if not isinstance(decompressed, str): raise TypeError() - values, lengths = encoding_table + values, lengths, ngrams = encoding_table + decompressed = ngrams_to_pua(decompressed, ngrams) enc = bytearray(len(decompressed) * 3) #print(decompressed) #print(lengths) diff --git a/supervisor/shared/translate.c b/supervisor/shared/translate.c index 606f8fa91a..49ee8f143f 100644 --- a/supervisor/shared/translate.c +++ b/supervisor/shared/translate.c @@ -34,6 +34,7 @@ #include "genhdr/compression.generated.h" #endif +#include "py/misc.h" #include "supervisor/serial.h" void serial_write_compressed(const compressed_string_t* compressed) { @@ -46,10 +47,18 @@ STATIC int put_utf8(char *buf, int u) { if(u <= 0x7f) { *buf = u; return 1; + } else if(MP_ARRAY_SIZE(ngrams) <= 64 && u <= 0xbf) { + int n = (u - 0x80) * 2; + int ret = put_utf8(buf, ngrams[n]); + return ret + put_utf8(buf + ret, ngrams[n+1]); } else if(u <= 0x07ff) { *buf++ = 0b11000000 | (u >> 6); *buf = 0b10000000 | (u & 0b00111111); return 2; + } else if(MP_ARRAY_SIZE(ngrams) > 64 && u >= 0xe000 && u <= 0xf8ff) { + int n = (u - 0xe000) * 2; + int ret = put_utf8(buf, ngrams[n]); + return ret + put_utf8(buf + ret, ngrams[n+1]); } else { // u <= 0xffff) *buf++ = 0b11000000 | (u >> 12); *buf = 0b10000000 | ((u >> 6) & 0b00111111); From f1c7389b2951edc3e24091565b1465252ac40a1e Mon Sep 17 00:00:00 2001 From: Jeff Epler Date: Wed, 2 Sep 2020 15:50:51 -0500 Subject: [PATCH 2/6] locales: Replace NBSP characters with true spaces These characters, at code point 0xa0, are unintended. --- locale/cs.po | 6 +++--- locale/pl.po | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/locale/cs.po b/locale/cs.po index cd81c258fe..3e22ac2514 100644 --- a/locale/cs.po +++ b/locale/cs.po @@ -44,15 +44,15 @@ msgstr "" #: py/obj.c msgid " File \"%q\"" -msgstr "  Soubor \"%q\"" +msgstr " Soubor \"%q\"" #: py/obj.c msgid " File \"%q\", line %d" -msgstr "  Soubor \"%q\", řádek %d" +msgstr " Soubor \"%q\", řádek %d" #: main.c msgid " output:\n" -msgstr " výstup:\n" +msgstr " výstup:\n" #: py/objstr.c #, c-format diff --git a/locale/pl.po b/locale/pl.po index b072629cd1..cd4b905e73 100644 --- a/locale/pl.po +++ b/locale/pl.po @@ -1971,7 +1971,7 @@ msgstr "wartość kalibracji poza zakresem +/-127" #: py/emitinlinethumb.c msgid "can only have up to 4 parameters to Thumb assembly" -msgstr "asembler Thumb może przyjąć do 4 parameterów" +msgstr "asembler Thumb może przyjąć do 4 parameterów" #: py/emitinlinextensa.c msgid "can only have up to 4 parameters to Xtensa assembly" @@ -3562,7 +3562,7 @@ msgstr "" #~ msgstr "Nie udało się odkryć serwisów" #~ msgid "Failed to get local address" -#~ msgstr "Nie udało się uzyskać lokalnego adresu" +#~ msgstr "Nie udało się uzyskać lokalnego adresu" #~ msgid "Failed to get softdevice state" #~ msgstr "Nie udało się odczytać stanu softdevice" @@ -3610,7 +3610,7 @@ msgstr "" #~ msgstr "Nie udało się zapisać gatts, błąd 0x%04x" #~ msgid "Flash erase failed" -#~ msgstr "Nie udało się skasować flash" +#~ msgstr "Nie udało się skasować flash" #~ msgid "Flash erase failed to start, err 0x%04x" #~ msgstr "Nie udało się rozpocząć kasowania flash, błąd 0x%04x" From c34cb82ecb26ac2d5d85394ec535928194cd9e5e Mon Sep 17 00:00:00 2001 From: Jeff Epler Date: Wed, 2 Sep 2020 15:52:02 -0500 Subject: [PATCH 3/6] makeqstrdata: correct range of low code points to 0x80..0x9f inclusive The previous range was unintentionally big and overlaps some characters we'd like to use (and also 0xa0, which we don't intentionally use) --- py/makeqstrdata.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/py/makeqstrdata.py b/py/makeqstrdata.py index 28aed3df97..350f11c4cb 100644 --- a/py/makeqstrdata.py +++ b/py/makeqstrdata.py @@ -116,7 +116,7 @@ def pua_to_ngrams(compressed, ngrams): if len(ngrams) > 32: start, end = 0xe000, 0xf8ff else: - start, end = 0x80, 0xbf + start, end = 0x80, 0x9f return "".join(ngrams[ord(c) - start] if (start <= ord(c) <= end) else c for c in compressed) def compute_huffman_coding(translations, qstrs, compression_filename): @@ -146,6 +146,7 @@ def compute_huffman_coding(translations, qstrs, compression_filename): last_l = l lengths = bytearray() print("// length count", length_count) + print("// bigrams", ngrams) for i in range(1, max(length_count) + 2): lengths.append(length_count.get(i, 0)) print("// values", values, "lengths", len(lengths), lengths) From cbfd38d1ce8839e11e828b0e8742d5d983446313 Mon Sep 17 00:00:00 2001 From: Jeff Epler Date: Wed, 2 Sep 2020 19:09:23 -0500 Subject: [PATCH 4/6] Rename functions to encode_ngrams / decode_ngrams --- py/makeqstrdata.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/py/makeqstrdata.py b/py/makeqstrdata.py index 350f11c4cb..40c50b7b43 100644 --- a/py/makeqstrdata.py +++ b/py/makeqstrdata.py @@ -103,7 +103,7 @@ def translate(translation_file, i18ns): def frequent_ngrams(corpus, sz, n): return collections.Counter(corpus[i:i+sz] for i in range(len(corpus)-sz)).most_common(n) -def ngrams_to_pua(translation, ngrams): +def encode_ngrams(translation, ngrams): if len(ngrams) > 32: start = 0xe000 else: @@ -112,7 +112,7 @@ def ngrams_to_pua(translation, ngrams): translation = translation.replace(g, chr(start + i)) return translation -def pua_to_ngrams(compressed, ngrams): +def decode_ngrams(compressed, ngrams): if len(ngrams) > 32: start, end = 0xe000, 0xf8ff else: @@ -123,7 +123,7 @@ def compute_huffman_coding(translations, qstrs, compression_filename): all_strings = [x[1] for x in translations] all_strings_concat = "".join(all_strings) ngrams = [i[0] for i in frequent_ngrams(all_strings_concat, 2, 32)] - all_strings_concat = ngrams_to_pua(all_strings_concat, ngrams) + all_strings_concat = encode_ngrams(all_strings_concat, ngrams) counts = collections.Counter(all_strings_concat) cb = huffman.codebook(counts.items()) values = [] @@ -211,7 +211,7 @@ def decompress(encoding_table, encoded, encoded_length_bits): searched_length += lengths[bit_length] v = values[searched_length + bits - max_code] - v = pua_to_ngrams(v, ngrams) + v = decode_ngrams(v, ngrams) i += len(v.encode('utf-8')) dec.append(v) return ''.join(dec) @@ -220,7 +220,7 @@ def compress(encoding_table, decompressed, encoded_length_bits, len_translation_ if not isinstance(decompressed, str): raise TypeError() values, lengths, ngrams = encoding_table - decompressed = ngrams_to_pua(decompressed, ngrams) + decompressed = encode_ngrams(decompressed, ngrams) enc = bytearray(len(decompressed) * 3) #print(decompressed) #print(lengths) From bdb07adfccaf25576a8f1e074db7f8b7e48890b2 Mon Sep 17 00:00:00 2001 From: Jeff Epler Date: Tue, 8 Sep 2020 19:07:53 -0500 Subject: [PATCH 5/6] translations: Make decompression clearer Now this gets filled in with values e.g., 128 (0x80) and 159 (0x9f). --- py/makeqstrdata.py | 9 ++++++++- supervisor/shared/translate.c | 15 +++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/py/makeqstrdata.py b/py/makeqstrdata.py index 40c50b7b43..721fa83206 100644 --- a/py/makeqstrdata.py +++ b/py/makeqstrdata.py @@ -159,7 +159,14 @@ def compute_huffman_coding(translations, qstrs, compression_filename): f.write("const uint8_t lengths[] = {{ {} }};\n".format(", ".join(map(str, lengths)))) f.write("const {} values[] = {{ {} }};\n".format(values_type, ", ".join(str(ord(u)) for u in values))) f.write("#define compress_max_length_bits ({})\n".format(max_translation_encoded_length.bit_length())) - f.write("const {} ngrams[] = {{ {} }};\n".format(values_type, ", ".join(str(u) for u in ngramdata))) + f.write("const {} bigrams[] = {{ {} }};\n".format(values_type, ", ".join(str(u) for u in ngramdata))) + if len(ngrams) > 32: + bigram_start = 0xe000 + else: + bigram_start = 0x80 + bigram_end = bigram_start + len(ngrams) - 1 # End is inclusive + f.write("#define bigram_start {}\n".format(bigram_start)) + f.write("#define bigram_end {}\n".format(bigram_end)) return values, lengths, ngrams def decompress(encoding_table, encoded, encoded_length_bits): diff --git a/supervisor/shared/translate.c b/supervisor/shared/translate.c index 49ee8f143f..0235293bee 100644 --- a/supervisor/shared/translate.c +++ b/supervisor/shared/translate.c @@ -47,19 +47,18 @@ STATIC int put_utf8(char *buf, int u) { if(u <= 0x7f) { *buf = u; return 1; - } else if(MP_ARRAY_SIZE(ngrams) <= 64 && u <= 0xbf) { + } else if(bigram_start <= u && u <= bigram_end) { int n = (u - 0x80) * 2; - int ret = put_utf8(buf, ngrams[n]); - return ret + put_utf8(buf + ret, ngrams[n+1]); + // (note that at present, entries in the bigrams table are + // guaranteed not to represent bigrams themselves, so this adds + // at most 1 level of recursive call + int ret = put_utf8(buf, bigrams[n]); + return ret + put_utf8(buf + ret, bigrams[n+1]); } else if(u <= 0x07ff) { *buf++ = 0b11000000 | (u >> 6); *buf = 0b10000000 | (u & 0b00111111); return 2; - } else if(MP_ARRAY_SIZE(ngrams) > 64 && u >= 0xe000 && u <= 0xf8ff) { - int n = (u - 0xe000) * 2; - int ret = put_utf8(buf, ngrams[n]); - return ret + put_utf8(buf + ret, ngrams[n+1]); - } else { // u <= 0xffff) + } else { // u <= 0xffff *buf++ = 0b11000000 | (u >> 12); *buf = 0b10000000 | ((u >> 6) & 0b00111111); *buf = 0b10000000 | (u & 0b00111111); From 0eee93729a3074461c4907f03deb803c2f35a6fe Mon Sep 17 00:00:00 2001 From: Jeff Epler Date: Tue, 8 Sep 2020 20:54:47 -0500 Subject: [PATCH 6/6] Fix decompression of unicode values above 2047 Two problems: The lead byte for 3-byte sequences was wrong, and one mid-byte was not even filled in due to a missing "++"! Apparently this was broken ever since the first "Compress as unicode, not bytes" commit, but I believed I'd "tested" it by running on the Pinyin translation. This rendered at least the Korean and Japanese translations completely illegible, affecting 5.0 and all later releases. --- supervisor/shared/translate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/supervisor/shared/translate.c b/supervisor/shared/translate.c index 0235293bee..5cd7b8dd88 100644 --- a/supervisor/shared/translate.c +++ b/supervisor/shared/translate.c @@ -59,8 +59,8 @@ STATIC int put_utf8(char *buf, int u) { *buf = 0b10000000 | (u & 0b00111111); return 2; } else { // u <= 0xffff - *buf++ = 0b11000000 | (u >> 12); - *buf = 0b10000000 | ((u >> 6) & 0b00111111); + *buf++ = 0b11100000 | (u >> 12); + *buf++ = 0b10000000 | ((u >> 6) & 0b00111111); *buf = 0b10000000 | (u & 0b00111111); return 3; }