circuitpython/py/mkrules.mk

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

270 lines
9.6 KiB
Makefile
Raw Normal View History

ifneq ($(MKENV_INCLUDED),1)
# We assume that mkenv is in the same directory as this file.
THIS_MAKEFILE = $(lastword $(MAKEFILE_LIST))
include $(dir $(THIS_MAKEFILE))mkenv.mk
endif
py: Implement "common word" compression scheme for error messages. The idea here is that there's a moderate amount of ROM used up by exception text. Obviously we try to keep the messages short, and the code can enable terse errors, but it still adds up. Listed below is the total string data size for various ports: bare-arm 2860 minimal 2876 stm32 8926 (PYBV11) cc3200 3751 esp32 5721 This commit implements compression of these strings. It takes advantage of the fact that these strings are all 7-bit ascii and extracts the top 128 frequently used words from the messages and stores them packed (dropping their null-terminator), then uses (0x80 | index) inside strings to refer to these common words. Spaces are automatically added around words, saving more bytes. This happens transparently in the build process, mirroring the steps that are used to generate the QSTR data. The MP_COMPRESSED_ROM_TEXT macro wraps any literal string that should compressed, and it's automatically decompressed in mp_decompress_rom_string. There are many schemes that could be used for the compression, and some are included in py/makecompresseddata.py for reference (space, Huffman, ngram, common word). Results showed that the common-word compression gets better results. This is before counting the increased cost of the Huffman decoder. This might be slightly counter-intuitive, but this data is extremely repetitive at a word-level, and the byte-level entropy coder can't quite exploit that as efficiently. Ideally one would combine both approaches, but for now the common-word approach is the one that is used. For additional comparison, the size of the raw data compressed with gzip and zlib is calculated, as a sort of proxy for a lower entropy bound. With this scheme we come within 15% on stm32, and 30% on bare-arm (i.e. we use x% more bytes than the data compressed with gzip -- not counting the code overhead of a decoder, and how this would be hypothetically implemented). The feature is disabled by default and can be enabled by setting MICROPY_ROM_TEXT_COMPRESSION at the Makefile-level.
2019-09-26 08:19:29 -04:00
# Extra deps that need to happen before object compilation.
OBJ_EXTRA_ORDER_DEPS =
ifeq ($(MICROPY_ROM_TEXT_COMPRESSION),1)
# If compression is enabled, trigger the build of compressed.data.h...
OBJ_EXTRA_ORDER_DEPS += $(HEADER_BUILD)/compressed.data.h
# ...and enable the MP_COMPRESSED_ROM_TEXT macro (used by MP_ERROR_TEXT).
CFLAGS += -DMICROPY_ROM_TEXT_COMPRESSION=1
endif
# QSTR generation uses the same CFLAGS, with these modifications.
QSTR_GEN_FLAGS = -DNO_QSTR
# Note: := to force evalulation immediately.
QSTR_GEN_CFLAGS := $(CFLAGS)
QSTR_GEN_CFLAGS += $(QSTR_GEN_FLAGS)
QSTR_GEN_CXXFLAGS := $(CXXFLAGS)
QSTR_GEN_CXXFLAGS += $(QSTR_GEN_FLAGS)
# This file expects that OBJ contains a list of all of the object files.
# The directory portion of each object file is used to locate the source
# and should not contain any ..'s but rather be relative to the top of the
# tree.
#
# So for example, py/map.c would have an object file name py/map.o
# The object files will go into the build directory and mantain the same
# directory structure as the source tree. So the final dependency will look
# like this:
#
# build/py/map.o: py/map.c
#
# We set vpath to point to the top of the tree so that the source files
# can be located. By following this scheme, it allows a single build rule
# to be used to compile all .c files.
vpath %.S . $(TOP) $(USER_C_MODULES)
$(BUILD)/%.o: %.S
$(ECHO) "CC $<"
$(Q)$(CC) $(CFLAGS) -c -o $@ $<
vpath %.s . $(TOP) $(USER_C_MODULES)
$(BUILD)/%.o: %.s
$(ECHO) "AS $<"
$(Q)$(AS) -o $@ $<
define compile_c
$(ECHO) "CC $<"
$(Q)$(CC) $(CFLAGS) -c -MD -o $@ $<
@# The following fixes the dependency file.
@# See http://make.paulandlesley.org/autodep.html for details.
@# Regex adjusted from the above to play better with Windows paths, etc.
@$(CP) $(@:.o=.d) $(@:.o=.P); \
$(SED) -e 's/#.*//' -e 's/^.*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(@:.o=.d) >> $(@:.o=.P); \
$(RM) -f $(@:.o=.d)
endef
define compile_cxx
$(ECHO) "CXX $<"
$(Q)$(CXX) $(CXXFLAGS) -c -MD -o $@ $<
@# The following fixes the dependency file.
@# See http://make.paulandlesley.org/autodep.html for details.
@# Regex adjusted from the above to play better with Windows paths, etc.
@$(CP) $(@:.o=.d) $(@:.o=.P); \
$(SED) -e 's/#.*//' -e 's/^.*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(@:.o=.d) >> $(@:.o=.P); \
$(RM) -f $(@:.o=.d)
endef
vpath %.c . $(TOP) $(USER_C_MODULES)
$(BUILD)/%.o: %.c
$(call compile_c)
vpath %.cpp . $(TOP) $(USER_C_MODULES)
$(BUILD)/%.o: %.cpp
$(call compile_cxx)
$(BUILD)/%.pp: %.c
$(ECHO) "PreProcess $<"
$(Q)$(CPP) $(CFLAGS) -Wp,-C,-dD,-dI -o $@ $<
2016-04-26 05:39:28 -04:00
# The following rule uses | to create an order only prerequisite. Order only
# prerequisites only get built if they don't exist. They don't cause timestamp
# checking to be performed.
#
# We don't know which source files actually need the generated.h (since
# it is #included from str.h). The compiler generated dependencies will cause
# the right .o's to get recompiled if the generated.h file changes. Adding
2016-04-26 05:39:28 -04:00
# an order-only dependency to all of the .o's will cause the generated .h
# to get built before we try to compile any of them.
py: Implement "common word" compression scheme for error messages. The idea here is that there's a moderate amount of ROM used up by exception text. Obviously we try to keep the messages short, and the code can enable terse errors, but it still adds up. Listed below is the total string data size for various ports: bare-arm 2860 minimal 2876 stm32 8926 (PYBV11) cc3200 3751 esp32 5721 This commit implements compression of these strings. It takes advantage of the fact that these strings are all 7-bit ascii and extracts the top 128 frequently used words from the messages and stores them packed (dropping their null-terminator), then uses (0x80 | index) inside strings to refer to these common words. Spaces are automatically added around words, saving more bytes. This happens transparently in the build process, mirroring the steps that are used to generate the QSTR data. The MP_COMPRESSED_ROM_TEXT macro wraps any literal string that should compressed, and it's automatically decompressed in mp_decompress_rom_string. There are many schemes that could be used for the compression, and some are included in py/makecompresseddata.py for reference (space, Huffman, ngram, common word). Results showed that the common-word compression gets better results. This is before counting the increased cost of the Huffman decoder. This might be slightly counter-intuitive, but this data is extremely repetitive at a word-level, and the byte-level entropy coder can't quite exploit that as efficiently. Ideally one would combine both approaches, but for now the common-word approach is the one that is used. For additional comparison, the size of the raw data compressed with gzip and zlib is calculated, as a sort of proxy for a lower entropy bound. With this scheme we come within 15% on stm32, and 30% on bare-arm (i.e. we use x% more bytes than the data compressed with gzip -- not counting the code overhead of a decoder, and how this would be hypothetically implemented). The feature is disabled by default and can be enabled by setting MICROPY_ROM_TEXT_COMPRESSION at the Makefile-level.
2019-09-26 08:19:29 -04:00
$(OBJ): | $(HEADER_BUILD)/qstrdefs.generated.h $(HEADER_BUILD)/mpversion.h $(OBJ_EXTRA_ORDER_DEPS)
# The logic for qstr regeneration (applied by makeqstrdefs.py) is:
# - if anything in QSTR_GLOBAL_DEPENDENCIES is newer, then process all source files ($^)
# - else, if list of newer prerequisites ($?) is not empty, then process just these ($?)
# - else, process all source files ($^) [this covers "make -B" which can set $? to empty]
# See more information about this process in docs/develop/qstr.rst.
$(HEADER_BUILD)/qstr.i.last: $(SRC_QSTR) $(QSTR_GLOBAL_DEPENDENCIES) $(HEADER_BUILD)/moduledefs.h | $(QSTR_GLOBAL_REQUIREMENTS)
$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py pp $(CPP) output $(HEADER_BUILD)/qstr.i.last cflags $(QSTR_GEN_CFLAGS) cxxflags $(QSTR_GEN_CXXFLAGS) sources $^ dependencies $(QSTR_GLOBAL_DEPENDENCIES) changed_sources $?
$(HEADER_BUILD)/qstr.split: $(HEADER_BUILD)/qstr.i.last
$(ECHO) "GEN $@"
py: Implement "common word" compression scheme for error messages. The idea here is that there's a moderate amount of ROM used up by exception text. Obviously we try to keep the messages short, and the code can enable terse errors, but it still adds up. Listed below is the total string data size for various ports: bare-arm 2860 minimal 2876 stm32 8926 (PYBV11) cc3200 3751 esp32 5721 This commit implements compression of these strings. It takes advantage of the fact that these strings are all 7-bit ascii and extracts the top 128 frequently used words from the messages and stores them packed (dropping their null-terminator), then uses (0x80 | index) inside strings to refer to these common words. Spaces are automatically added around words, saving more bytes. This happens transparently in the build process, mirroring the steps that are used to generate the QSTR data. The MP_COMPRESSED_ROM_TEXT macro wraps any literal string that should compressed, and it's automatically decompressed in mp_decompress_rom_string. There are many schemes that could be used for the compression, and some are included in py/makecompresseddata.py for reference (space, Huffman, ngram, common word). Results showed that the common-word compression gets better results. This is before counting the increased cost of the Huffman decoder. This might be slightly counter-intuitive, but this data is extremely repetitive at a word-level, and the byte-level entropy coder can't quite exploit that as efficiently. Ideally one would combine both approaches, but for now the common-word approach is the one that is used. For additional comparison, the size of the raw data compressed with gzip and zlib is calculated, as a sort of proxy for a lower entropy bound. With this scheme we come within 15% on stm32, and 30% on bare-arm (i.e. we use x% more bytes than the data compressed with gzip -- not counting the code overhead of a decoder, and how this would be hypothetically implemented). The feature is disabled by default and can be enabled by setting MICROPY_ROM_TEXT_COMPRESSION at the Makefile-level.
2019-09-26 08:19:29 -04:00
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py split qstr $< $(HEADER_BUILD)/qstr _
$(Q)$(TOUCH) $@
$(QSTR_DEFS_COLLECTED): $(HEADER_BUILD)/qstr.split
$(ECHO) "GEN $@"
py: Implement "common word" compression scheme for error messages. The idea here is that there's a moderate amount of ROM used up by exception text. Obviously we try to keep the messages short, and the code can enable terse errors, but it still adds up. Listed below is the total string data size for various ports: bare-arm 2860 minimal 2876 stm32 8926 (PYBV11) cc3200 3751 esp32 5721 This commit implements compression of these strings. It takes advantage of the fact that these strings are all 7-bit ascii and extracts the top 128 frequently used words from the messages and stores them packed (dropping their null-terminator), then uses (0x80 | index) inside strings to refer to these common words. Spaces are automatically added around words, saving more bytes. This happens transparently in the build process, mirroring the steps that are used to generate the QSTR data. The MP_COMPRESSED_ROM_TEXT macro wraps any literal string that should compressed, and it's automatically decompressed in mp_decompress_rom_string. There are many schemes that could be used for the compression, and some are included in py/makecompresseddata.py for reference (space, Huffman, ngram, common word). Results showed that the common-word compression gets better results. This is before counting the increased cost of the Huffman decoder. This might be slightly counter-intuitive, but this data is extremely repetitive at a word-level, and the byte-level entropy coder can't quite exploit that as efficiently. Ideally one would combine both approaches, but for now the common-word approach is the one that is used. For additional comparison, the size of the raw data compressed with gzip and zlib is calculated, as a sort of proxy for a lower entropy bound. With this scheme we come within 15% on stm32, and 30% on bare-arm (i.e. we use x% more bytes than the data compressed with gzip -- not counting the code overhead of a decoder, and how this would be hypothetically implemented). The feature is disabled by default and can be enabled by setting MICROPY_ROM_TEXT_COMPRESSION at the Makefile-level.
2019-09-26 08:19:29 -04:00
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py cat qstr _ $(HEADER_BUILD)/qstr $@
# Compressed error strings.
$(HEADER_BUILD)/compressed.split: $(HEADER_BUILD)/qstr.i.last
$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py split compress $< $(HEADER_BUILD)/compress _
$(Q)$(TOUCH) $@
$(HEADER_BUILD)/compressed.collected: $(HEADER_BUILD)/compressed.split
$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py cat compress _ $(HEADER_BUILD)/compress $@
# $(sort $(var)) removes duplicates
#
# The net effect of this, is it causes the objects to depend on the
# object directories (but only for existence), and the object directories
# will be created if they don't exist.
OBJ_DIRS = $(sort $(dir $(OBJ)))
$(OBJ): | $(OBJ_DIRS)
$(OBJ_DIRS):
$(MKDIR) -p $@
$(HEADER_BUILD):
$(MKDIR) -p $@
ifneq ($(MICROPY_MPYCROSS_DEPENDENCY),)
# to automatically build mpy-cross, if needed
$(MICROPY_MPYCROSS_DEPENDENCY):
$(MAKE) -C $(dir $@)
endif
ifneq ($(FROZEN_MANIFEST),)
# to build frozen_content.c from a manifest
$(BUILD)/frozen_content.c: FORCE $(BUILD)/genhdr/qstrdefs.generated.h | $(MICROPY_MPYCROSS_DEPENDENCY)
$(Q)$(MAKE_MANIFEST) -o $@ -v "MPY_DIR=$(TOP)" -v "MPY_LIB_DIR=$(MPY_LIB_DIR)" -v "PORT_DIR=$(shell pwd)" -v "BOARD_DIR=$(BOARD_DIR)" -b "$(BUILD)" $(if $(MPY_CROSS_FLAGS),-f"$(MPY_CROSS_FLAGS)",) --mpy-tool-flags="$(MPY_TOOL_FLAGS)" $(FROZEN_MANIFEST)
ifneq ($(FROZEN_DIR),)
$(error FROZEN_DIR cannot be used in conjunction with FROZEN_MANIFEST)
endif
ifneq ($(FROZEN_MPY_DIR),)
$(error FROZEN_MPY_DIR cannot be used in conjunction with FROZEN_MANIFEST)
endif
endif
ifneq ($(FROZEN_DIR),)
$(info Warning: FROZEN_DIR is deprecated in favour of FROZEN_MANIFEST)
$(BUILD)/frozen.c: $(wildcard $(FROZEN_DIR)/*) $(HEADER_BUILD) $(FROZEN_EXTRA_DEPS)
$(ECHO) "GEN $@"
$(Q)$(MAKE_FROZEN) $(FROZEN_DIR) > $@
endif
ifneq ($(FROZEN_MPY_DIR),)
$(info Warning: FROZEN_MPY_DIR is deprecated in favour of FROZEN_MANIFEST)
# make a list of all the .py files that need compiling and freezing
FROZEN_MPY_PY_FILES := $(shell find -L $(FROZEN_MPY_DIR) -type f -name '*.py' | $(SED) -e 's=^$(FROZEN_MPY_DIR)/==')
FROZEN_MPY_MPY_FILES := $(addprefix $(BUILD)/frozen_mpy/,$(FROZEN_MPY_PY_FILES:.py=.mpy))
# to build .mpy files from .py files
$(BUILD)/frozen_mpy/%.mpy: $(FROZEN_MPY_DIR)/%.py | $(MICROPY_MPYCROSS_DEPENDENCY)
@$(ECHO) "MPY $<"
$(Q)$(MKDIR) -p $(dir $@)
$(Q)$(MICROPY_MPYCROSS) -o $@ -s $(<:$(FROZEN_MPY_DIR)/%=%) $(MPY_CROSS_FLAGS) $<
# to build frozen_mpy.c from all .mpy files
$(BUILD)/frozen_mpy.c: $(FROZEN_MPY_MPY_FILES) $(BUILD)/genhdr/qstrdefs.generated.h
@$(ECHO) "GEN $@"
$(Q)$(MPY_TOOL) -f -q $(BUILD)/genhdr/qstrdefs.preprocessed.h $(FROZEN_MPY_MPY_FILES) > $@
endif
ifneq ($(PROG),)
unix-cpy: Remove unix-cpy. It's no longer needed. unix-cpy was originally written to get semantic equivalent with CPython without writing functional tests. When writing the initial implementation of uPy it was a long way between lexer and functional tests, so the half-way test was to make sure that the bytecode was correct. The idea was that if the uPy bytecode matched CPython 1-1 then uPy would be proper Python if the bytecodes acted correctly. And having matching bytecode meant that it was less likely to miss some deep subtlety in the Python semantics that would require an architectural change later on. But that is all history and it no longer makes sense to retain the ability to output CPython bytecode, because: 1. It outputs CPython 3.3 compatible bytecode. CPython's bytecode changes from version to version, and seems to have changed quite a bit in 3.5. There's no point in changing the bytecode output to match CPython anymore. 2. uPy and CPy do different optimisations to the bytecode which makes it harder to match. 3. The bytecode tests are not run. They were never part of Travis and are not run locally anymore. 4. The EMIT_CPYTHON option needs a lot of extra source code which adds heaps of noise, especially in compile.c. 5. Now that there is an extensive test suite (which tests functionality) there is no need to match the bytecode. Some very subtle behaviour is tested with the test suite and passing these tests is a much better way to stay Python-language compliant, rather than trying to match CPy bytecode.
2015-08-14 07:24:11 -04:00
# Build a standalone executable (unix does this)
# The executable should have an .exe extension for builds targetting 'pure'
# Windows, i.e. msvc or mingw builds, but not when using msys or cygwin's gcc.
COMPILER_TARGET := $(shell $(CC) -dumpmachine)
ifneq (,$(findstring mingw,$(COMPILER_TARGET)))
PROG := $(PROG).exe
endif
all: $(PROG)
$(PROG): $(OBJ)
$(ECHO) "LINK $@"
# Do not pass COPT here - it's *C* compiler optimizations. For example,
# we may want to compile using Thumb, but link with non-Thumb libc.
$(Q)$(CC) -o $@ $^ $(LIB) $(LDFLAGS)
ifndef DEBUG
$(Q)$(STRIP) $(STRIPFLAGS_EXTRA) $@
endif
$(Q)$(SIZE) $$(find $(BUILD) -path "$(BUILD)/build/frozen*.o") $@
clean: clean-prog
clean-prog:
$(RM) -f $(PROG)
$(RM) -f $(PROG).map
.PHONY: clean-prog
endif
submodules:
$(ECHO) "Updating submodules: $(GIT_SUBMODULES)"
ifneq ($(GIT_SUBMODULES),)
$(Q)git submodule update --init $(addprefix $(TOP)/,$(GIT_SUBMODULES))
endif
.PHONY: submodules
LIBMICROPYTHON = libmicropython.a
# We can execute extra commands after library creation using
# LIBMICROPYTHON_EXTRA_CMD. This may be needed e.g. to integrate
# with 3rd-party projects which don't have proper dependency
# tracking. Then LIBMICROPYTHON_EXTRA_CMD can e.g. touch some
# other file to cause needed effect, e.g. relinking with new lib.
lib $(LIBMICROPYTHON): $(OBJ)
$(Q)$(AR) rcs $(LIBMICROPYTHON) $^
$(LIBMICROPYTHON_EXTRA_CMD)
clean:
$(RM) -rf $(BUILD) $(CLEAN_EXTRA)
.PHONY: clean
# Clean every non-git file from FROZEN_DIR/FROZEN_MPY_DIR, but making a backup.
# We run rmdir below to avoid empty backup dir (it will silently fail if backup
# is non-empty).
clean-frozen:
if [ -n "$(FROZEN_MPY_DIR)" ]; then \
backup_dir=$(FROZEN_MPY_DIR).$$(date +%Y%m%dT%H%M%S); mkdir $$backup_dir; \
cd $(FROZEN_MPY_DIR); git status --ignored -u all -s . | awk ' {print $$2}' \
| xargs --no-run-if-empty cp --parents -t ../$$backup_dir; \
rmdir ../$$backup_dir 2>/dev/null || true; \
git clean -d -f .; \
fi
if [ -n "$(FROZEN_DIR)" ]; then \
backup_dir=$(FROZEN_DIR).$$(date +%Y%m%dT%H%M%S); mkdir $$backup_dir; \
cd $(FROZEN_DIR); git status --ignored -u all -s . | awk ' {print $$2}' \
| xargs --no-run-if-empty cp --parents -t ../$$backup_dir; \
rmdir ../$$backup_dir 2>/dev/null || true; \
git clean -d -f .; \
fi
.PHONY: clean-frozen
print-cfg:
$(ECHO) "PY_SRC = $(PY_SRC)"
$(ECHO) "BUILD = $(BUILD)"
$(ECHO) "OBJ = $(OBJ)"
.PHONY: print-cfg
print-def:
@$(ECHO) "The following defines are built into the $(CC) compiler"
$(TOUCH) __empty__.c
@$(CC) -E -Wp,-dM __empty__.c
@$(RM) -f __empty__.c
-include $(OBJ:.o=.P)