63b9944382
This commit adds a completely new implementation of the uasyncio module. The aim of this version (compared to the original one in micropython-lib) is to be more compatible with CPython's asyncio module, so that one can more easily write code that runs under both MicroPython and CPython (and reuse CPython asyncio libraries, follow CPython asyncio tutorials, etc). Async code is not easy to write and any knowledge users already have from CPython asyncio should transfer to uasyncio without effort, and vice versa. The implementation here attempts to provide good compatibility with CPython's asyncio while still being "micro" enough to run where MicroPython runs. This follows the general philosophy of MicroPython itself, to make it feel like Python. The main change is to use a Task object for each coroutine. This allows more flexibility to queue tasks in various places, eg the main run loop, tasks waiting on events, locks or other tasks. It no longer requires pre-allocating a fixed queue size for the main run loop. A pairing heap is used to queue Tasks. It's currently implemented in pure Python, separated into components with lazy importing for optional components. In the future parts of this implementation can be moved to C to improve speed and reduce memory usage. But the aim is to maintain a pure-Python version as a reference version.
54 lines
1.7 KiB
Python
54 lines
1.7 KiB
Python
# MicroPython uasyncio module
|
|
# MIT license; Copyright (c) 2019-2020 Damien P. George
|
|
|
|
from . import core
|
|
|
|
# Lock class for primitive mutex capability
|
|
class Lock:
|
|
def __init__(self):
|
|
# The state can take the following values:
|
|
# - 0: unlocked
|
|
# - 1: locked
|
|
# - <Task>: unlocked but this task has been scheduled to acquire the lock next
|
|
self.state = 0
|
|
# Queue of Tasks waiting to acquire this Lock
|
|
self.waiting = core.TaskQueue()
|
|
|
|
def locked(self):
|
|
return self.state == 1
|
|
|
|
def release(self):
|
|
if self.state != 1:
|
|
raise RuntimeError
|
|
if self.waiting.peek():
|
|
# Task(s) waiting on lock, schedule next Task
|
|
self.state = self.waiting.pop_head()
|
|
core._task_queue.push_head(self.state)
|
|
else:
|
|
# No Task waiting so unlock
|
|
self.state = 0
|
|
|
|
async def acquire(self):
|
|
if self.state != 0:
|
|
# Lock unavailable, put the calling Task on the waiting queue
|
|
self.waiting.push_head(core.cur_task)
|
|
# Set calling task's data to the lock's queue so it can be removed if needed
|
|
core.cur_task.data = self.waiting
|
|
try:
|
|
yield
|
|
except core.CancelledError as er:
|
|
if self.state == core.cur_task:
|
|
# Cancelled while pending on resume, schedule next waiting Task
|
|
self.state = 1
|
|
self.release()
|
|
raise er
|
|
# Lock available, set it as locked
|
|
self.state = 1
|
|
return True
|
|
|
|
async def __aenter__(self):
|
|
return await self.acquire()
|
|
|
|
async def __aexit__(self, exc_type, exc, tb):
|
|
return self.release()
|