circuitpython/py/py.mk

372 lines
10 KiB
Makefile
Raw Normal View History

# where py object files go (they have a name prefix to prevent filename clashes)
PY_BUILD = $(BUILD)/py
# where autogenerated header files go
HEADER_BUILD = $(BUILD)/genhdr
# file containing qstr defs for the core Python bit
PY_QSTR_DEFS = $(PY_SRC)/qstrdefs.h
2018-07-31 19:53:54 -04:00
TRANSLATION := en_US
# If qstr autogeneration is not disabled we specify the output header
# for all collected qstrings.
ifneq ($(QSTR_AUTOGEN_DISABLE),1)
QSTR_DEFS_COLLECTED = $(HEADER_BUILD)/qstrdefs.collected.h
endif
# Any files listed by this variable will cause a full regeneration of qstrs
QSTR_GLOBAL_DEPENDENCIES += $(PY_SRC)/mpconfig.h mpconfigport.h
# some code is performance bottleneck and compiled with other optimization options
CSUPEROPT = -O3
# this sets the config file for FatFs
CFLAGS_MOD += -DFFCONF_H=\"lib/oofatfs/ffconf.h\"
ifeq ($(MICROPY_PY_USSL),1)
CFLAGS_MOD += -DMICROPY_PY_USSL=1
ifeq ($(MICROPY_SSL_AXTLS),1)
CFLAGS_MOD += -DMICROPY_SSL_AXTLS=1 -I$(TOP)/lib/axtls/ssl -I$(TOP)/lib/axtls/crypto -I$(TOP)/lib/axtls/config
LDFLAGS_MOD += -L$(BUILD) -laxtls
else ifeq ($(MICROPY_SSL_MBEDTLS),1)
2017-05-29 03:08:14 -04:00
# Can be overridden by ports which have "builtin" mbedTLS
MICROPY_SSL_MBEDTLS_INCLUDE ?= $(TOP)/lib/mbedtls/include
CFLAGS_MOD += -DMICROPY_SSL_MBEDTLS=1 -I$(MICROPY_SSL_MBEDTLS_INCLUDE)
LDFLAGS_MOD += -L$(TOP)/lib/mbedtls/library -lmbedx509 -lmbedtls -lmbedcrypto
endif
endif
#ifeq ($(MICROPY_PY_LWIP),1)
#CFLAGS_MOD += -DMICROPY_PY_LWIP=1 -I../lib/lwip/src/include -I../lib/lwip/src/include/ipv4 -I../extmod/lwip-include
#endif
ifeq ($(MICROPY_PY_LWIP),1)
LWIP_DIR = lib/lwip/src
INC += -I$(TOP)/lib/lwip/src/include -I$(TOP)/lib/lwip/src/include/ipv4 -I$(TOP)/extmod/lwip-include
CFLAGS_MOD += -DMICROPY_PY_LWIP=1
SRC_MOD += extmod/modlwip.c lib/netutils/netutils.c
SRC_MOD += $(addprefix $(LWIP_DIR)/,\
core/def.c \
core/dns.c \
core/init.c \
core/mem.c \
core/memp.c \
core/netif.c \
core/pbuf.c \
core/raw.c \
core/stats.c \
core/sys.c \
core/tcp.c \
core/tcp_in.c \
core/tcp_out.c \
core/timers.c \
core/udp.c \
core/ipv4/autoip.c \
core/ipv4/icmp.c \
core/ipv4/igmp.c \
core/ipv4/inet.c \
core/ipv4/inet_chksum.c \
core/ipv4/ip_addr.c \
core/ipv4/ip.c \
core/ipv4/ip_frag.c \
)
ifeq ($(MICROPY_PY_LWIP_SLIP),1)
CFLAGS_MOD += -DMICROPY_PY_LWIP_SLIP=1
SRC_MOD += $(LWIP_DIR)/netif/slipif.c
endif
endif
ifeq ($(MICROPY_PY_BTREE),1)
BTREE_DIR = lib/berkeley-db-1.xx
BTREE_DEFS = -D__DBINTERFACE_PRIVATE=1 -Dmpool_error=printf -Dabort=abort_ -Dvirt_fd_t=mp_obj_t "-DVIRT_FD_T_HEADER=<py/obj.h>" $(BTREE_DEFS_EXTRA)
INC += -I$(TOP)/$(BTREE_DIR)/PORT/include
SRC_MOD += extmod/modbtree.c
SRC_MOD += $(addprefix $(BTREE_DIR)/,\
btree/bt_close.c \
btree/bt_conv.c \
btree/bt_debug.c \
btree/bt_delete.c \
btree/bt_get.c \
btree/bt_open.c \
btree/bt_overflow.c \
btree/bt_page.c \
btree/bt_put.c \
btree/bt_search.c \
btree/bt_seq.c \
btree/bt_split.c \
btree/bt_utils.c \
mpool/mpool.c \
)
CFLAGS_MOD += -DMICROPY_PY_BTREE=1
# we need to suppress certain warnings to get berkeley-db to compile cleanly
# and we have separate BTREE_DEFS so the definitions don't interfere with other source code
$(BUILD)/$(BTREE_DIR)/%.o: CFLAGS += -Wno-old-style-definition -Wno-sign-compare -Wno-unused-parameter $(BTREE_DEFS)
$(BUILD)/extmod/modbtree.o: CFLAGS += $(BTREE_DEFS)
endif
2019-04-05 15:39:44 -04:00
# External modules written in C.
ifneq ($(USER_C_MODULES),)
# pre-define USERMOD variables as expanded so that variables are immediate
2019-04-05 15:39:44 -04:00
# expanded as they're added to them
SRC_USERMOD :=
2019-04-05 15:39:44 -04:00
CFLAGS_USERMOD :=
LDFLAGS_USERMOD :=
$(foreach module, $(wildcard $(USER_C_MODULES)/*/micropython.mk), \
$(eval USERMOD_DIR = $(patsubst %/,%,$(dir $(module))))\
$(info Including User C Module from $(USERMOD_DIR))\
$(eval include $(module))\
)
SRC_MOD += $(patsubst $(USER_C_MODULES)/%.c,%.c,$(SRC_USERMOD))
CFLAGS_MOD += $(CFLAGS_USERMOD)
LDFLAGS_MOD += $(LDFLAGS_USERMOD)
endif
# py object files
PY_CORE_O_BASENAME = $(addprefix py/,\
mpstate.o \
nlr.o \
nlrx86.o \
nlrx64.o \
nlrthumb.o \
2014-11-27 15:29:33 -05:00
nlrxtensa.o \
nlrsetjmp.o \
malloc.o \
gc.o \
Introduce a long lived section of the heap. This adapts the allocation process to start from either end of the heap when searching for free space. The default behavior is identical to the existing behavior where it starts with the lowest block and looks higher. Now it can also look from the highest block and lower depending on the long_lived parameter to gc_alloc. As the heap fills, the two sections may overlap. When they overlap, a collect may be triggered in order to keep the long lived section compact. However, free space is always eligable for each type of allocation. By starting from either of the end of the heap we have ability to separate short lived objects from long lived ones. This separation reduces heap fragmentation because long lived objects are easy to densely pack. Most objects are short lived initially but may be made long lived when they are referenced by a type or module. This involves copying the memory and then letting the collect phase free the old portion. QSTR pools and chunks are always long lived because they are never freed. The reallocation, collection and free processes are largely unchanged. They simply also maintain an index to the highest free block as well as the lowest. These indices are used to speed up the allocation search until the next collect. In practice, this change may slightly slow down import statements with the benefit that memory is much less fragmented afterwards. For example, a test import into a 20k heap that leaves ~6k free previously had the largest continuous free space of ~400 bytes. After this change, the largest continuous free space is over 3400 bytes.
2018-01-23 19:22:05 -05:00
gc_long_lived.o \
py: Introduce a Python stack for scoped allocation. This patch introduces the MICROPY_ENABLE_PYSTACK option (disabled by default) which enables a "Python stack" that allows to allocate and free memory in a scoped, or Last-In-First-Out (LIFO) way, similar to alloca(). A new memory allocation API is introduced along with this Py-stack. It includes both "local" and "nonlocal" LIFO allocation. Local allocation is intended to be equivalent to using alloca(), whereby the same function must free the memory. Nonlocal allocation is where another function may free the memory, so long as it's still LIFO. Follow-up patches will convert all uses of alloca() and VLA to the new scoped allocation API. The old behaviour (using alloca()) will still be available, but when MICROPY_ENABLE_PYSTACK is enabled then alloca() is no longer required or used. The benefits of enabling this option are (or will be once subsequent patches are made to convert alloca()/VLA): - Toolchains without alloca() can use this feature to obtain correct and efficient scoped memory allocation (compared to using the heap instead of alloca(), which is slower). - Even if alloca() is available, enabling the Py-stack gives slightly more efficient use of stack space when calling nested Python functions, due to the way that compilers implement alloca(). - Enabling the Py-stack with the stackless mode allows for even more efficient stack usage, as well as retaining high performance (because the heap is no longer used to build and destroy stackless code states). - With Py-stack and stackless enabled, Python-calling-Python is no longer recursive in the C mp_execute_bytecode function. The micropython.pystack_use() function is included to measure usage of the Python stack.
2017-11-26 07:28:40 -05:00
pystack.o \
qstr.o \
vstr.o \
mpprint.o \
unicode.o \
mpz.o \
reader.o \
lexer.o \
parse.o \
scope.o \
compile.o \
emitcommon.o \
emitbc.o \
asmbase.o \
asmx64.o \
emitnx64.o \
asmx86.o \
emitnx86.o \
asmthumb.o \
emitnthumb.o \
emitinlinethumb.o \
2014-08-16 16:55:53 -04:00
asmarm.o \
emitnarm.o \
asmxtensa.o \
emitnxtensa.o \
emitinlinextensa.o \
formatfloat.o \
parsenumbase.o \
parsenum.o \
emitglue.o \
persistentcode.o \
runtime.o \
runtime_utils.o \
scheduler.o \
nativeglue.o \
stackctrl.o \
argcheck.o \
warning.o \
map.o \
obj.o \
objarray.o \
objattrtuple.o \
objbool.o \
objboundmeth.o \
objcell.o \
objclosure.o \
objcomplex.o \
objdeque.o \
objdict.o \
2014-01-14 18:55:01 -05:00
objenumerate.o \
objexcept.o \
2014-01-14 20:37:08 -05:00
objfilter.o \
objfloat.o \
objfun.o \
objgenerator.o \
objgetitemiter.o \
objint.o \
objint_longlong.o \
objint_mpz.o \
objlist.o \
2014-01-14 20:10:09 -05:00
objmap.o \
objmodule.o \
2014-03-22 17:31:28 -04:00
objobject.o \
objpolyiter.o \
objproperty.o \
objnone.o \
objnamedtuple.o \
objrange.o \
objreversed.o \
objset.o \
objsingleton.o \
objslice.o \
objstr.o \
objstrunicode.o \
2014-04-26 13:26:14 -04:00
objstringio.o \
objtuple.o \
objtype.o \
objzip.o \
opmethods.o \
protocols: Allow them to be (optionally) type-safe Protocols are nice, but there is no way for C code to verify whether a type's "protocol" structure actually implements some particular protocol. As a result, you can pass an object that implements the "vfs" protocol to one that expects the "stream" protocol, and the opposite of awesomeness ensues. This patch adds an OPTIONAL (but enabled by default) protocol identifier as the first member of any protocol structure. This identifier is simply a unique QSTR chosen by the protocol designer and used by each protocol implementer. When checking for protocol support, instead of just checking whether the object's type has a non-NULL protocol field, use `mp_proto_get` which implements the protocol check when possible. The existing protocols are now named: protocol_framebuf protocol_i2c protocol_pin protocol_stream protocol_spi protocol_vfs (most of these are unused in CP and are just inherited from MP; vfs and stream are definitely used though) I did not find any crashing examples, but here's one to give a flavor of what is improved, using `micropython_coverage`. Before the change, the vfs "ioctl" protocol is invoked, and the result is not intelligible as json (but it could have resulted in a hard fault, potentially): >>> import uos, ujson >>> u = uos.VfsPosix('/tmp') >>> ujson.load(u) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: syntax error in JSON After the change, the vfs object is correctly detected as not supporting the stream protocol: >>> ujson.load(p) Traceback (most recent call last): File "<stdin>", line 1, in <module> OSError: stream operation not supported
2019-12-03 15:50:37 -05:00
proto.o \
2018-05-14 17:41:17 -04:00
reload.o \
sequence.o \
stream.o \
binary.o \
builtinimport.o \
2014-02-03 17:44:10 -05:00
builtinevex.o \
builtinhelp.o \
modarray.o \
modbuiltins.o \
modcollections.o \
modgc.o \
modio.o \
modmath.o \
modcmath.o \
modmicropython.o \
modstruct.o \
modsys.o \
moduerrno.o \
modthread.o \
vm.o \
bc.o \
showbc.o \
repl.o \
smallint.o \
frozenmod.o \
)
PY_EXTMOD_O_BASENAME = \
extmod/moductypes.o \
extmod/modujson.o \
extmod/modure.o \
extmod/moduzlib.o \
extmod/moduheapq.o \
extmod/modutimeq.o \
extmod/moduhashlib.o \
extmod/modubinascii.o \
extmod/virtpin.o \
extmod/modussl_axtls.o \
extmod/modussl_mbedtls.o \
extmod/modurandom.o \
extmod/moduselect.o \
extmod/modwebsocket.o \
extmod/modwebrepl.o \
extmod/modframebuf.o \
extmod/vfs.o \
extmod/vfs_reader.o \
extmod/vfs_posix.o \
extmod/vfs_posix_file.o \
extmod/vfs_fat.o \
extmod/vfs_fat_diskio.o \
extmod/vfs_fat_file.o \
extmod/utime_mphal.o \
extmod/uos_dupterm.o \
lib/embed/abort_.o \
lib/utils/printf.o \
# prepend the build destination prefix to the py object files
PY_CORE_O = $(addprefix $(BUILD)/, $(PY_CORE_O_BASENAME))
PY_EXTMOD_O = $(addprefix $(BUILD)/, $(PY_EXTMOD_O_BASENAME))
# this is a convenience variable for ports that want core, extmod and frozen code
PY_O = $(PY_CORE_O) $(PY_EXTMOD_O)
# object file for frozen files
ifneq ($(FROZEN_DIR),)
PY_O += $(BUILD)/frozen.o
endif
# Combine old singular FROZEN_MPY_DIR with new multiple value form.
FROZEN_MPY_DIRS += $(FROZEN_MPY_DIR)
# object file for frozen bytecode (frozen .mpy files)
ifneq ($(FROZEN_MPY_DIRS),)
PY_O += $(BUILD)/frozen_mpy.o
endif
# Sources that may contain qstrings
SRC_QSTR_IGNORE = py/nlr%
SRC_QSTR_EMITNATIVE = py/emitn%
SRC_QSTR = $(SRC_MOD) $(filter-out $(SRC_QSTR_IGNORE),$(PY_CORE_O_BASENAME:.o=.c)) $(PY_EXTMOD_O_BASENAME:.o=.c)
# Sources that only hold QSTRs after pre-processing.
SRC_QSTR_PREPROCESSOR = $(addprefix $(TOP)/, $(filter $(SRC_QSTR_EMITNATIVE),$(PY_CORE_O_BASENAME:.o=.c)))
2014-04-03 19:32:58 -04:00
# Anything that depends on FORCE will be considered out-of-date
FORCE:
.PHONY: FORCE
$(HEADER_BUILD)/mpversion.h: FORCE | $(HEADER_BUILD)
2018-07-31 19:53:54 -04:00
$(STEPECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makeversionhdr.py $@
2014-04-03 19:32:58 -04:00
2019-04-05 15:39:44 -04:00
# build a list of registered modules for py/objmodule.c.
$(HEADER_BUILD)/moduledefs.h: $(SRC_QSTR) $(QSTR_GLOBAL_DEPENDENCIES) | $(HEADER_BUILD)/mpversion.h
2019-12-05 22:45:53 -05:00
@$(STEPECHO) "GEN $@"
2019-04-05 15:39:44 -04:00
$(Q)$(PYTHON) $(PY_SRC)/makemoduledefs.py --vpath="., $(TOP), $(USER_C_MODULES)" $(SRC_QSTR) > $@
SRC_QSTR += $(HEADER_BUILD)/moduledefs.h
# mpconfigport.mk is optional, but changes to it may drastically change
# overall config, so they need to be caught
MPCONFIGPORT_MK = $(wildcard mpconfigport.mk)
$(HEADER_BUILD)/$(TRANSLATION).mo: $(TOP)/locale/$(TRANSLATION).po | $(HEADER_BUILD)
2018-07-31 19:53:54 -04:00
$(Q)msgfmt -o $@ $^
$(HEADER_BUILD)/qstrdefs.preprocessed.h: $(PY_QSTR_DEFS) $(QSTR_DEFS) $(QSTR_DEFS_COLLECTED) mpconfigport.h $(MPCONFIGPORT_MK) $(PY_SRC)/mpconfig.h | $(HEADER_BUILD)
$(STEPECHO) "GEN $@"
$(Q)cat $(PY_QSTR_DEFS) $(QSTR_DEFS) $(QSTR_DEFS_COLLECTED) | $(SED) 's/^Q(.*)/"&"/' | $(CPP) $(CFLAGS) - | $(SED) 's/^"\(Q(.*)\)"/\1/' > $@
# qstr data
2018-07-31 19:53:54 -04:00
$(HEADER_BUILD)/qstrdefs.enum.h: $(PY_SRC)/makeqstrdata.py $(HEADER_BUILD)/qstrdefs.preprocessed.h
$(STEPECHO) "GEN $@"
$(Q)$(PYTHON3) $(PY_SRC)/makeqstrdata.py $(HEADER_BUILD)/qstrdefs.preprocessed.h > $@
2018-07-31 19:53:54 -04:00
# Adding an order only dependency on $(HEADER_BUILD) causes $(HEADER_BUILD) to get
# created before we run the script to generate the .h
# Note: we need to protect the qstr names from the preprocessor, so we wrap
# the lines in "" and then unwrap after the preprocessor is finished.
2018-07-31 19:53:54 -04:00
$(HEADER_BUILD)/qstrdefs.generated.h: $(PY_SRC)/makeqstrdata.py $(HEADER_BUILD)/$(TRANSLATION).mo $(HEADER_BUILD)/qstrdefs.preprocessed.h
$(STEPECHO) "GEN $@"
$(Q)$(PYTHON3) $(PY_SRC)/makeqstrdata.py --compression_filename $(HEADER_BUILD)/compression.generated.h --translation $(HEADER_BUILD)/$(TRANSLATION).mo $(HEADER_BUILD)/qstrdefs.preprocessed.h > $@
2018-07-31 19:53:54 -04:00
$(PY_BUILD)/qstr.o: $(HEADER_BUILD)/qstrdefs.generated.h
# Force nlr code to always be compiled with space-saving optimisation so
# that the function preludes are of a minimal and predictable form.
$(PY_BUILD)/nlr%.o: CFLAGS += -Os
# optimising gc for speed; 5ms down to 4ms on pybv2
ifndef SUPEROPT_GC
SUPEROPT_GC = 1
endif
ifeq ($(SUPEROPT_GC),1)
$(PY_BUILD)/gc.o: CFLAGS += $(CSUPEROPT)
endif
# optimising vm for speed, adds only a small amount to code size but makes a huge difference to speed (20% faster)
ifndef SUPEROPT_VM
SUPEROPT_VM = 1
endif
ifeq ($(SUPEROPT_VM),1)
$(PY_BUILD)/vm.o: CFLAGS += $(CSUPEROPT)
endif
# Optimizing vm.o for modern deeply pipelined CPUs with branch predictors
# may require disabling tail jump optimization. This will make sure that
# each opcode has its own dispatching jump which will improve branch
# branch predictor efficiency.
# http://article.gmane.org/gmane.comp.lang.lua.general/75426
# http://hg.python.org/cpython/file/b127046831e2/Python/ceval.c#l828
# http://www.emulators.com/docs/nx25_nostradamus.htm
#-fno-crossjumping