circuitpython/py/py.mk

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

309 lines
9.5 KiB
Makefile
Raw Normal View History

# where py object files go (they have a name prefix to prevent filename clashes)
PY_BUILD = $(BUILD)/py
# where autogenerated header files go
HEADER_BUILD = $(BUILD)/genhdr
# file containing qstr defs for the core Python bit
PY_QSTR_DEFS = $(PY_SRC)/qstrdefs.h
TRANSLATION ?= en_US
2018-07-31 19:53:54 -04:00
# If qstr autogeneration is not disabled we specify the output header
# for all collected qstrings.
ifneq ($(QSTR_AUTOGEN_DISABLE),1)
QSTR_DEFS_COLLECTED = $(HEADER_BUILD)/qstrdefs.collected.h
endif
# Any files listed by these variables will cause a full regeneration of qstrs
# DEPENDENCIES: included in qstr processing; REQUIREMENTS: not included
QSTR_GLOBAL_DEPENDENCIES += $(PY_SRC)/mpconfig.h mpconfigport.h
QSTR_GLOBAL_REQUIREMENTS += $(HEADER_BUILD)/mpversion.h
# some code is performance bottleneck and compiled with other optimization options
2020-12-14 18:57:31 -05:00
CSUPEROPT = -O3
# Enable building 32-bit code on 64-bit host.
ifeq ($(MICROPY_FORCE_32BIT),1)
CC += -m32
CXX += -m32
LD += -m32
endif
# External modules written in C.
ifneq ($(USER_C_MODULES),)
# pre-define USERMOD variables as expanded so that variables are immediate
# expanded as they're added to them
# C/C++ files that are included in the QSTR/module build
SRC_USERMOD_C :=
SRC_USERMOD_CXX :=
# Other C/C++ files (e.g. libraries or helpers)
SRC_USERMOD_LIB_C :=
SRC_USERMOD_LIB_CXX :=
# Optionally set flags
CFLAGS_USERMOD :=
CXXFLAGS_USERMOD :=
LDFLAGS_USERMOD :=
# Backwards compatibility with older user c modules that set SRC_USERMOD
# added to SRC_USERMOD_C below
SRC_USERMOD :=
$(foreach module, $(wildcard $(USER_C_MODULES)/*/micropython.mk), \
$(eval USERMOD_DIR = $(patsubst %/,%,$(dir $(module))))\
$(info Including User C Module from $(USERMOD_DIR))\
$(eval include $(module))\
)
SRC_USERMOD_C += $(SRC_USERMOD)
SRC_USERMOD_PATHFIX_C += $(patsubst $(USER_C_MODULES)/%.c,%.c,$(SRC_USERMOD_C))
SRC_USERMOD_PATHFIX_CXX += $(patsubst $(USER_C_MODULES)/%.cpp,%.cpp,$(SRC_USERMOD_CXX))
SRC_USERMOD_PATHFIX_LIB_C += $(patsubst $(USER_C_MODULES)/%.c,%.c,$(SRC_USERMOD_LIB_C))
SRC_USERMOD_PATHFIX_LIB_CXX += $(patsubst $(USER_C_MODULES)/%.cpp,%.cpp,$(SRC_USERMOD_LIB_CXX))
CFLAGS += $(CFLAGS_USERMOD)
CXXFLAGS += $(CXXFLAGS_USERMOD)
LDFLAGS += $(LDFLAGS_USERMOD)
SRC_QSTR += $(SRC_USERMOD_PATHFIX_C) $(SRC_USERMOD_PATHFIX_CXX)
PY_O += $(addprefix $(BUILD)/, $(SRC_USERMOD_PATHFIX_C:.c=.o))
PY_O += $(addprefix $(BUILD)/, $(SRC_USERMOD_PATHFIX_CXX:.cpp=.o))
PY_O += $(addprefix $(BUILD)/, $(SRC_USERMOD_PATHFIX_LIB_C:.c=.o))
PY_O += $(addprefix $(BUILD)/, $(SRC_USERMOD_PATHFIX_LIB_CXX:.cpp=.o))
2023-09-22 11:39:50 -04:00
# CIRCUITPY
ifeq ($(CIRCUITPY_ULAB),1)
ULAB_SRCS := $(shell find $(TOP)/extmod/ulab/code -type f -name "*.c")
SRC_MOD += $(patsubst $(TOP)/%,%,$(ULAB_SRCS))
CFLAGS_MOD += -DCIRCUITPY_ULAB=1 -DMODULE_ULAB_ENABLED=1 -DULAB_HAS_USER_MODULE=0 -iquote $(TOP)/extmod/ulab/code
$(BUILD)/extmod/ulab/code/%.o: CFLAGS += -Wno-missing-declarations -Wno-missing-prototypes -Wno-unused-parameter -Wno-float-equal -Wno-sign-compare -Wno-cast-align -Wno-shadow -DCIRCUITPY
ifeq ($(CIRCUITPY_ULAB_OPTIMIZE_SIZE),1)
$(BUILD)/extmod/ulab/code/%.o: CFLAGS += -Os
2023-09-20 12:26:17 -04:00
endif # CIRCUITPY_ULAB_OPTIMIZE_SIZE
endif # CIRCUITPY_ULAB
endif # USER_C_MODULES
2020-02-04 11:24:37 -05:00
# py object files
PY_CORE_O_BASENAME = $(addprefix py/,\
mpstate.o \
nlr.o \
nlrx86.o \
nlrx64.o \
nlrthumb.o \
2021-02-17 18:28:42 -05:00
nlraarch64.o \
nlrmips.o \
nlrpowerpc.o \
2014-11-27 15:29:33 -05:00
nlrxtensa.o \
nlrsetjmp.o \
malloc.o \
gc.o \
py: Introduce a Python stack for scoped allocation. This patch introduces the MICROPY_ENABLE_PYSTACK option (disabled by default) which enables a "Python stack" that allows to allocate and free memory in a scoped, or Last-In-First-Out (LIFO) way, similar to alloca(). A new memory allocation API is introduced along with this Py-stack. It includes both "local" and "nonlocal" LIFO allocation. Local allocation is intended to be equivalent to using alloca(), whereby the same function must free the memory. Nonlocal allocation is where another function may free the memory, so long as it's still LIFO. Follow-up patches will convert all uses of alloca() and VLA to the new scoped allocation API. The old behaviour (using alloca()) will still be available, but when MICROPY_ENABLE_PYSTACK is enabled then alloca() is no longer required or used. The benefits of enabling this option are (or will be once subsequent patches are made to convert alloca()/VLA): - Toolchains without alloca() can use this feature to obtain correct and efficient scoped memory allocation (compared to using the heap instead of alloca(), which is slower). - Even if alloca() is available, enabling the Py-stack gives slightly more efficient use of stack space when calling nested Python functions, due to the way that compilers implement alloca(). - Enabling the Py-stack with the stackless mode allows for even more efficient stack usage, as well as retaining high performance (because the heap is no longer used to build and destroy stackless code states). - With Py-stack and stackless enabled, Python-calling-Python is no longer recursive in the C mp_execute_bytecode function. The micropython.pystack_use() function is included to measure usage of the Python stack.
2017-11-26 07:28:40 -05:00
pystack.o \
qstr.o \
vstr.o \
mpprint.o \
unicode.o \
mpz.o \
reader.o \
lexer.o \
parse.o \
scope.o \
compile.o \
emitcommon.o \
emitbc.o \
2023-09-22 15:26:25 -04:00
enum.o \
asmbase.o \
asmx64.o \
emitnx64.o \
asmx86.o \
emitnx86.o \
asmthumb.o \
emitnthumb.o \
emitinlinethumb.o \
2014-08-16 16:55:53 -04:00
asmarm.o \
emitnarm.o \
asmxtensa.o \
emitnxtensa.o \
emitinlinextensa.o \
emitnxtensawin.o \
formatfloat.o \
parsenumbase.o \
parsenum.o \
2023-09-22 15:26:25 -04:00
proto.o \
emitglue.o \
persistentcode.o \
runtime.o \
runtime_utils.o \
scheduler.o \
nativeglue.o \
pairheap.o \
ringbuf.o \
stackctrl.o \
argcheck.o \
warning.o \
profile.o \
map.o \
obj.o \
objarray.o \
objattrtuple.o \
objbool.o \
objboundmeth.o \
objcell.o \
objclosure.o \
objcomplex.o \
objdeque.o \
objdict.o \
2014-01-14 18:55:01 -05:00
objenumerate.o \
objexcept.o \
2014-01-14 20:37:08 -05:00
objfilter.o \
objfloat.o \
objfun.o \
objgenerator.o \
objgetitemiter.o \
objint.o \
objint_longlong.o \
objint_mpz.o \
objlist.o \
2014-01-14 20:10:09 -05:00
objmap.o \
objmodule.o \
2014-03-22 17:31:28 -04:00
objobject.o \
objpolyiter.o \
objproperty.o \
objnone.o \
objnamedtuple.o \
objrange.o \
objreversed.o \
objset.o \
objsingleton.o \
objslice.o \
objstr.o \
objstrunicode.o \
2014-04-26 13:26:14 -04:00
objstringio.o \
2023-09-20 12:46:22 -04:00
objtraceback.o \
objtuple.o \
objtype.o \
objzip.o \
opmethods.o \
sequence.o \
stream.o \
binary.o \
builtinimport.o \
2014-02-03 17:44:10 -05:00
builtinevex.o \
builtinhelp.o \
modarray.o \
modbuiltins.o \
modcollections.o \
modgc.o \
modio.o \
modmath.o \
modcmath.o \
modmicropython.o \
modstruct.o \
modsys.o \
moduerrno.o \
modthread.o \
vm.o \
bc.o \
showbc.o \
repl.o \
smallint.o \
frozenmod.o \
)
# prepend the build destination prefix to the py object files
PY_CORE_O = $(addprefix $(BUILD)/, $(PY_CORE_O_BASENAME))
# this is a convenience variable for ports that want core, extmod and frozen code
PY_O += $(PY_CORE_O)
# object file for frozen code specified via a manifest
ifneq ($(FROZEN_MANIFEST),)
PY_O += $(BUILD)/$(BUILD)/frozen_content.o
endif
# Sources that may contain qstrings
SRC_QSTR_IGNORE = py/nlr%
SRC_QSTR += $(filter-out $(SRC_QSTR_IGNORE),$(PY_CORE_O_BASENAME:.o=.c))
2014-04-03 19:32:58 -04:00
# Anything that depends on FORCE will be considered out-of-date
FORCE:
.PHONY: FORCE
$(HEADER_BUILD)/mpversion.h: FORCE | $(HEADER_BUILD)
$(Q)$(PYTHON) $(PY_SRC)/makeversionhdr.py $@
2019-04-05 15:39:44 -04:00
# mpconfigport.mk is optional, but changes to it may drastically change
# overall config, so they need to be caught
MPCONFIGPORT_MK = $(wildcard mpconfigport.mk)
# qstr data
# Adding an order only dependency on $(HEADER_BUILD) causes $(HEADER_BUILD) to get
# created before we run the script to generate the .h
# Note: we need to protect the qstr names from the preprocessor, so we wrap
# the lines in "" and then unwrap after the preprocessor is finished.
2023-08-10 20:06:32 -04:00
# See more information about this process in docs/develop/qstr.rst.
$(HEADER_BUILD)/qstrdefs.generated.h: $(PY_QSTR_DEFS) $(QSTR_DEFS) $(QSTR_DEFS_COLLECTED) $(PY_SRC)/makeqstrdata.py mpconfigport.h $(MPCONFIGPORT_MK) $(PY_SRC)/mpconfig.h | $(HEADER_BUILD)
$(ECHO) "GEN $@"
$(Q)$(CAT) $(PY_QSTR_DEFS) $(QSTR_DEFS) $(QSTR_DEFS_COLLECTED) | $(SED) 's/^Q(.*)/"&"/' | $(CPP) $(CFLAGS) - | $(SED) 's/^\"\(Q(.*)\)\"/\1/' > $(HEADER_BUILD)/qstrdefs.preprocessed.h
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdata.py $(HEADER_BUILD)/qstrdefs.preprocessed.h > $@
$(HEADER_BUILD)/compressed.data.h: $(HEADER_BUILD)/compressed.collected
$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makecompresseddata.py $< > $@
2023-09-20 12:46:35 -04:00
# CIRCUITPY: for translations
$(HEADER_BUILD)/$(TRANSLATION).mo: $(TOP)/locale/$(TRANSLATION).po | $(HEADER_BUILD)
$(Q)$(PYTHON) $(TOP)/tools/msgfmt.py -o $@ $^
# translations-*.c is generated as a side-effect of building compressed_translations.generated.h
# Specifying both in a single rule actually causes the rule to be run twice!
# This alternative makes it run just once.
# Another alternative is "grouped targets" (`a b &: c`), available in GNU make 4.3 and later.
# TODO: use grouped targets when we expect GNU make >= 4.3 is pervasive.
$(PY_BUILD)/translations-$(TRANSLATION).c: $(HEADER_BUILD)/compressed_translations.generated.h
@true
$(HEADER_BUILD)/compressed_translations.generated.h: $(PY_SRC)/maketranslationdata.py $(HEADER_BUILD)/$(TRANSLATION).mo $(HEADER_BUILD)/qstrdefs.generated.h
$(STEPECHO) "GEN $@"
$(Q)mkdir -p $(PY_BUILD)
$(Q)$(PYTHON) $(PY_SRC)/maketranslationdata.py --compression_filename $(HEADER_BUILD)/compressed_translations.generated.h --translation $(HEADER_BUILD)/$(TRANSLATION).mo --translation_filename $(PY_BUILD)/translations-$(TRANSLATION).c $(HEADER_BUILD)/qstrdefs.generated.h $(HEADER_BUILD)/qstrdefs.preprocessed.h
PY_CORE_O += $(PY_BUILD)/translations-$(TRANSLATION).o
# build a list of registered modules for py/objmodule.c.
$(HEADER_BUILD)/moduledefs.h: $(HEADER_BUILD)/moduledefs.collected
@$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makemoduledefs.py $< > $@
# build a list of registered root pointers for py/mpstate.h.
$(HEADER_BUILD)/root_pointers.h: $(HEADER_BUILD)/root_pointers.collected $(PY_SRC)/make_root_pointers.py
@$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/make_root_pointers.py $< > $@
# Standard C functions like memset need to be compiled with special flags so
# the compiler does not optimise these functions in terms of themselves.
CFLAGS_BUILTIN ?= -ffreestanding -fno-builtin -fno-lto
$(BUILD)/shared/libc/string0.o: CFLAGS += $(CFLAGS_BUILTIN)
# Force nlr code to always be compiled with space-saving optimisation so
# that the function preludes are of a minimal and predictable form.
$(PY_BUILD)/nlr%.o: CFLAGS += -Os
# optimising gc for speed; 5ms down to 4ms on pybv2
$(PY_BUILD)/gc.o: CFLAGS += $(CSUPEROPT)
# optimising vm for speed, adds only a small amount to code size but makes a huge difference to speed (20% faster)
$(PY_BUILD)/vm.o: CFLAGS += $(CSUPEROPT)
# Optimizing vm.o for modern deeply pipelined CPUs with branch predictors
# may require disabling tail jump optimization. This will make sure that
# each opcode has its own dispatching jump which will improve branch
# branch predictor efficiency.
2018-11-30 16:23:53 -05:00
# https://marc.info/?l=lua-l&m=129778596120851
# http://hg.python.org/cpython/file/b127046831e2/Python/ceval.c#l828
# http://www.emulators.com/docs/nx25_nostradamus.htm
#-fno-crossjumping
# Include rules for extmod related code
include $(TOP)/extmod/extmod.mk