2019-11-13 05:07:58 -05:00
|
|
|
# MicroPython uasyncio module
|
|
|
|
# MIT license; Copyright (c) 2019 Damien P. George
|
|
|
|
|
|
|
|
from time import ticks_ms as ticks, ticks_diff, ticks_add
|
|
|
|
import sys, select
|
|
|
|
|
2020-03-12 01:46:20 -04:00
|
|
|
# Import TaskQueue and Task, preferring built-in C code over Python code
|
|
|
|
try:
|
|
|
|
from _uasyncio import TaskQueue, Task
|
|
|
|
except:
|
|
|
|
from .task import TaskQueue, Task
|
2019-11-13 05:07:58 -05:00
|
|
|
|
|
|
|
|
|
|
|
################################################################################
|
|
|
|
# Exceptions
|
|
|
|
|
|
|
|
|
|
|
|
class CancelledError(BaseException):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class TimeoutError(Exception):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2020-03-30 02:41:42 -04:00
|
|
|
# Used when calling Loop.call_exception_handler
|
|
|
|
_exc_context = {"message": "Task exception wasn't retrieved", "exception": None, "future": None}
|
|
|
|
|
|
|
|
|
2019-11-13 05:07:58 -05:00
|
|
|
################################################################################
|
|
|
|
# Sleep functions
|
|
|
|
|
|
|
|
# "Yield" once, then raise StopIteration
|
|
|
|
class SingletonGenerator:
|
|
|
|
def __init__(self):
|
|
|
|
self.state = None
|
|
|
|
self.exc = StopIteration()
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __next__(self):
|
|
|
|
if self.state is not None:
|
2022-04-20 03:20:07 -04:00
|
|
|
_task_queue.push(cur_task, self.state)
|
2019-11-13 05:07:58 -05:00
|
|
|
self.state = None
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
self.exc.__traceback__ = None
|
|
|
|
raise self.exc
|
|
|
|
|
|
|
|
|
|
|
|
# Pause task execution for the given time (integer in milliseconds, uPy extension)
|
|
|
|
# Use a SingletonGenerator to do it without allocating on the heap
|
|
|
|
def sleep_ms(t, sgen=SingletonGenerator()):
|
|
|
|
assert sgen.state is None
|
2020-08-20 09:13:25 -04:00
|
|
|
sgen.state = ticks_add(ticks(), max(0, t))
|
2019-11-13 05:07:58 -05:00
|
|
|
return sgen
|
|
|
|
|
|
|
|
|
|
|
|
# Pause task execution for the given time (in seconds)
|
|
|
|
def sleep(t):
|
|
|
|
return sleep_ms(int(t * 1000))
|
|
|
|
|
|
|
|
|
|
|
|
################################################################################
|
|
|
|
# Queue and poller for stream IO
|
|
|
|
|
|
|
|
|
|
|
|
class IOQueue:
|
|
|
|
def __init__(self):
|
|
|
|
self.poller = select.poll()
|
|
|
|
self.map = {} # maps id(stream) to [task_waiting_read, task_waiting_write, stream]
|
|
|
|
|
|
|
|
def _enqueue(self, s, idx):
|
|
|
|
if id(s) not in self.map:
|
|
|
|
entry = [None, None, s]
|
|
|
|
entry[idx] = cur_task
|
|
|
|
self.map[id(s)] = entry
|
|
|
|
self.poller.register(s, select.POLLIN if idx == 0 else select.POLLOUT)
|
|
|
|
else:
|
|
|
|
sm = self.map[id(s)]
|
|
|
|
assert sm[idx] is None
|
|
|
|
assert sm[1 - idx] is not None
|
|
|
|
sm[idx] = cur_task
|
|
|
|
self.poller.modify(s, select.POLLIN | select.POLLOUT)
|
|
|
|
# Link task to this IOQueue so it can be removed if needed
|
|
|
|
cur_task.data = self
|
|
|
|
|
|
|
|
def _dequeue(self, s):
|
|
|
|
del self.map[id(s)]
|
|
|
|
self.poller.unregister(s)
|
|
|
|
|
|
|
|
def queue_read(self, s):
|
|
|
|
self._enqueue(s, 0)
|
|
|
|
|
|
|
|
def queue_write(self, s):
|
|
|
|
self._enqueue(s, 1)
|
|
|
|
|
|
|
|
def remove(self, task):
|
|
|
|
while True:
|
|
|
|
del_s = None
|
|
|
|
for k in self.map: # Iterate without allocating on the heap
|
|
|
|
q0, q1, s = self.map[k]
|
|
|
|
if q0 is task or q1 is task:
|
|
|
|
del_s = s
|
|
|
|
break
|
|
|
|
if del_s is not None:
|
|
|
|
self._dequeue(s)
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
def wait_io_event(self, dt):
|
|
|
|
for s, ev in self.poller.ipoll(dt):
|
|
|
|
sm = self.map[id(s)]
|
|
|
|
# print('poll', s, sm, ev)
|
|
|
|
if ev & ~select.POLLOUT and sm[0] is not None:
|
|
|
|
# POLLIN or error
|
2022-04-20 03:20:07 -04:00
|
|
|
_task_queue.push(sm[0])
|
2019-11-13 05:07:58 -05:00
|
|
|
sm[0] = None
|
|
|
|
if ev & ~select.POLLIN and sm[1] is not None:
|
|
|
|
# POLLOUT or error
|
2022-04-20 03:20:07 -04:00
|
|
|
_task_queue.push(sm[1])
|
2019-11-13 05:07:58 -05:00
|
|
|
sm[1] = None
|
|
|
|
if sm[0] is None and sm[1] is None:
|
|
|
|
self._dequeue(s)
|
|
|
|
elif sm[0] is None:
|
|
|
|
self.poller.modify(s, select.POLLOUT)
|
|
|
|
else:
|
|
|
|
self.poller.modify(s, select.POLLIN)
|
|
|
|
|
|
|
|
|
|
|
|
################################################################################
|
|
|
|
# Main run loop
|
|
|
|
|
|
|
|
# Ensure the awaitable is a task
|
|
|
|
def _promote_to_task(aw):
|
|
|
|
return aw if isinstance(aw, Task) else create_task(aw)
|
|
|
|
|
|
|
|
|
|
|
|
# Create and schedule a new task from a coroutine
|
|
|
|
def create_task(coro):
|
|
|
|
if not hasattr(coro, "send"):
|
|
|
|
raise TypeError("coroutine expected")
|
|
|
|
t = Task(coro, globals())
|
2022-04-20 03:20:07 -04:00
|
|
|
_task_queue.push(t)
|
2019-11-13 05:07:58 -05:00
|
|
|
return t
|
|
|
|
|
|
|
|
|
|
|
|
# Keep scheduling tasks until there are none left to schedule
|
|
|
|
def run_until_complete(main_task=None):
|
|
|
|
global cur_task
|
|
|
|
excs_all = (CancelledError, Exception) # To prevent heap allocation in loop
|
|
|
|
excs_stop = (CancelledError, StopIteration) # To prevent heap allocation in loop
|
|
|
|
while True:
|
|
|
|
# Wait until the head of _task_queue is ready to run
|
|
|
|
dt = 1
|
|
|
|
while dt > 0:
|
|
|
|
dt = -1
|
|
|
|
t = _task_queue.peek()
|
|
|
|
if t:
|
|
|
|
# A task waiting on _task_queue; "ph_key" is time to schedule task at
|
|
|
|
dt = max(0, ticks_diff(t.ph_key, ticks()))
|
|
|
|
elif not _io_queue.map:
|
|
|
|
# No tasks can be woken so finished running
|
|
|
|
return
|
|
|
|
# print('(poll {})'.format(dt), len(_io_queue.map))
|
|
|
|
_io_queue.wait_io_event(dt)
|
|
|
|
|
|
|
|
# Get next task to run and continue it
|
2022-04-20 03:20:07 -04:00
|
|
|
t = _task_queue.pop()
|
2019-11-13 05:07:58 -05:00
|
|
|
cur_task = t
|
|
|
|
try:
|
|
|
|
# Continue running the coroutine, it's responsible for rescheduling itself
|
|
|
|
exc = t.data
|
|
|
|
if not exc:
|
|
|
|
t.coro.send(None)
|
|
|
|
else:
|
extmod/uasyncio: Fix race with cancelled task waiting on finished task.
This commit fixes a problem with a race between cancellation of task A and
completion of task B, when A waits on B. If task B completes just before
task A is cancelled then the cancellation of A does not work. Instead,
the CancelledError meant to cancel A gets passed through to B (that's
expected behaviour) but B handles it as a "Task exception wasn't retrieved"
scenario, printing out such a message (this is because finished tasks point
their "coro" attribute to themselves to indicate they are done, and
implement the throw() method, but that method inadvertently catches the
CancelledError). The correct behaviour is for B to bounce that
CancelledError back out.
This bug is mainly seen when wait_for() is used, and in that context the
symptoms are:
- occurs when using wait_for(T, S), if the task T being waited on finishes
at exactly the same time as the wait-for timeout S expires
- task T will have run to completion
- the "Task exception wasn't retrieved message" is printed with
"<class 'CancelledError'>" as the error (ie no traceback)
- the wait_for(T, S) call never returns (it's never put back on the
uasyncio run queue) and all tasks waiting on this are blocked forever
from running
- uasyncio otherwise continues to function and other tasks continue to be
scheduled as normal
The fix here reworks the "waiting" attribute of Task to be called "state"
and uses it to indicate whether a task is: running and not awaited on,
running and awaited on, finished and not awaited on, or finished and
awaited on. This means the task does not need to point "coro" to itself to
indicate finished, and also allows removal of the throw() method.
A benefit of this is that "Task exception wasn't retrieved" messages can go
back to being able to print the name of the coroutine function.
Fixes issue #7386.
Signed-off-by: Damien George <damien@micropython.org>
2021-06-14 08:32:51 -04:00
|
|
|
# If the task is finished and on the run queue and gets here, then it
|
|
|
|
# had an exception and was not await'ed on. Throwing into it now will
|
|
|
|
# raise StopIteration and the code below will catch this and run the
|
|
|
|
# call_exception_handler function.
|
2019-11-13 05:07:58 -05:00
|
|
|
t.data = None
|
|
|
|
t.coro.throw(exc)
|
|
|
|
except excs_all as er:
|
|
|
|
# Check the task is not on any event queue
|
|
|
|
assert t.data is None
|
|
|
|
# This task is done, check if it's the main task and then loop should stop
|
|
|
|
if t is main_task:
|
|
|
|
if isinstance(er, StopIteration):
|
|
|
|
return er.value
|
|
|
|
raise er
|
extmod/uasyncio: Fix race with cancelled task waiting on finished task.
This commit fixes a problem with a race between cancellation of task A and
completion of task B, when A waits on B. If task B completes just before
task A is cancelled then the cancellation of A does not work. Instead,
the CancelledError meant to cancel A gets passed through to B (that's
expected behaviour) but B handles it as a "Task exception wasn't retrieved"
scenario, printing out such a message (this is because finished tasks point
their "coro" attribute to themselves to indicate they are done, and
implement the throw() method, but that method inadvertently catches the
CancelledError). The correct behaviour is for B to bounce that
CancelledError back out.
This bug is mainly seen when wait_for() is used, and in that context the
symptoms are:
- occurs when using wait_for(T, S), if the task T being waited on finishes
at exactly the same time as the wait-for timeout S expires
- task T will have run to completion
- the "Task exception wasn't retrieved message" is printed with
"<class 'CancelledError'>" as the error (ie no traceback)
- the wait_for(T, S) call never returns (it's never put back on the
uasyncio run queue) and all tasks waiting on this are blocked forever
from running
- uasyncio otherwise continues to function and other tasks continue to be
scheduled as normal
The fix here reworks the "waiting" attribute of Task to be called "state"
and uses it to indicate whether a task is: running and not awaited on,
running and awaited on, finished and not awaited on, or finished and
awaited on. This means the task does not need to point "coro" to itself to
indicate finished, and also allows removal of the throw() method.
A benefit of this is that "Task exception wasn't retrieved" messages can go
back to being able to print the name of the coroutine function.
Fixes issue #7386.
Signed-off-by: Damien George <damien@micropython.org>
2021-06-14 08:32:51 -04:00
|
|
|
if t.state:
|
|
|
|
# Task was running but is now finished.
|
|
|
|
waiting = False
|
|
|
|
if t.state is True:
|
|
|
|
# "None" indicates that the task is complete and not await'ed on (yet).
|
|
|
|
t.state = None
|
2022-03-28 21:27:56 -04:00
|
|
|
elif callable(t.state):
|
|
|
|
# The task has a callback registered to be called on completion.
|
|
|
|
t.state(t, er)
|
|
|
|
t.state = False
|
|
|
|
waiting = True
|
extmod/uasyncio: Fix race with cancelled task waiting on finished task.
This commit fixes a problem with a race between cancellation of task A and
completion of task B, when A waits on B. If task B completes just before
task A is cancelled then the cancellation of A does not work. Instead,
the CancelledError meant to cancel A gets passed through to B (that's
expected behaviour) but B handles it as a "Task exception wasn't retrieved"
scenario, printing out such a message (this is because finished tasks point
their "coro" attribute to themselves to indicate they are done, and
implement the throw() method, but that method inadvertently catches the
CancelledError). The correct behaviour is for B to bounce that
CancelledError back out.
This bug is mainly seen when wait_for() is used, and in that context the
symptoms are:
- occurs when using wait_for(T, S), if the task T being waited on finishes
at exactly the same time as the wait-for timeout S expires
- task T will have run to completion
- the "Task exception wasn't retrieved message" is printed with
"<class 'CancelledError'>" as the error (ie no traceback)
- the wait_for(T, S) call never returns (it's never put back on the
uasyncio run queue) and all tasks waiting on this are blocked forever
from running
- uasyncio otherwise continues to function and other tasks continue to be
scheduled as normal
The fix here reworks the "waiting" attribute of Task to be called "state"
and uses it to indicate whether a task is: running and not awaited on,
running and awaited on, finished and not awaited on, or finished and
awaited on. This means the task does not need to point "coro" to itself to
indicate finished, and also allows removal of the throw() method.
A benefit of this is that "Task exception wasn't retrieved" messages can go
back to being able to print the name of the coroutine function.
Fixes issue #7386.
Signed-off-by: Damien George <damien@micropython.org>
2021-06-14 08:32:51 -04:00
|
|
|
else:
|
|
|
|
# Schedule any other tasks waiting on the completion of this task.
|
|
|
|
while t.state.peek():
|
2022-04-20 03:20:07 -04:00
|
|
|
_task_queue.push(t.state.pop())
|
extmod/uasyncio: Fix race with cancelled task waiting on finished task.
This commit fixes a problem with a race between cancellation of task A and
completion of task B, when A waits on B. If task B completes just before
task A is cancelled then the cancellation of A does not work. Instead,
the CancelledError meant to cancel A gets passed through to B (that's
expected behaviour) but B handles it as a "Task exception wasn't retrieved"
scenario, printing out such a message (this is because finished tasks point
their "coro" attribute to themselves to indicate they are done, and
implement the throw() method, but that method inadvertently catches the
CancelledError). The correct behaviour is for B to bounce that
CancelledError back out.
This bug is mainly seen when wait_for() is used, and in that context the
symptoms are:
- occurs when using wait_for(T, S), if the task T being waited on finishes
at exactly the same time as the wait-for timeout S expires
- task T will have run to completion
- the "Task exception wasn't retrieved message" is printed with
"<class 'CancelledError'>" as the error (ie no traceback)
- the wait_for(T, S) call never returns (it's never put back on the
uasyncio run queue) and all tasks waiting on this are blocked forever
from running
- uasyncio otherwise continues to function and other tasks continue to be
scheduled as normal
The fix here reworks the "waiting" attribute of Task to be called "state"
and uses it to indicate whether a task is: running and not awaited on,
running and awaited on, finished and not awaited on, or finished and
awaited on. This means the task does not need to point "coro" to itself to
indicate finished, and also allows removal of the throw() method.
A benefit of this is that "Task exception wasn't retrieved" messages can go
back to being able to print the name of the coroutine function.
Fixes issue #7386.
Signed-off-by: Damien George <damien@micropython.org>
2021-06-14 08:32:51 -04:00
|
|
|
waiting = True
|
|
|
|
# "False" indicates that the task is complete and has been await'ed on.
|
|
|
|
t.state = False
|
|
|
|
if not waiting and not isinstance(er, excs_stop):
|
|
|
|
# An exception ended this detached task, so queue it for later
|
|
|
|
# execution to handle the uncaught exception if no other task retrieves
|
|
|
|
# the exception in the meantime (this is handled by Task.throw).
|
2022-04-20 03:20:07 -04:00
|
|
|
_task_queue.push(t)
|
extmod/uasyncio: Fix race with cancelled task waiting on finished task.
This commit fixes a problem with a race between cancellation of task A and
completion of task B, when A waits on B. If task B completes just before
task A is cancelled then the cancellation of A does not work. Instead,
the CancelledError meant to cancel A gets passed through to B (that's
expected behaviour) but B handles it as a "Task exception wasn't retrieved"
scenario, printing out such a message (this is because finished tasks point
their "coro" attribute to themselves to indicate they are done, and
implement the throw() method, but that method inadvertently catches the
CancelledError). The correct behaviour is for B to bounce that
CancelledError back out.
This bug is mainly seen when wait_for() is used, and in that context the
symptoms are:
- occurs when using wait_for(T, S), if the task T being waited on finishes
at exactly the same time as the wait-for timeout S expires
- task T will have run to completion
- the "Task exception wasn't retrieved message" is printed with
"<class 'CancelledError'>" as the error (ie no traceback)
- the wait_for(T, S) call never returns (it's never put back on the
uasyncio run queue) and all tasks waiting on this are blocked forever
from running
- uasyncio otherwise continues to function and other tasks continue to be
scheduled as normal
The fix here reworks the "waiting" attribute of Task to be called "state"
and uses it to indicate whether a task is: running and not awaited on,
running and awaited on, finished and not awaited on, or finished and
awaited on. This means the task does not need to point "coro" to itself to
indicate finished, and also allows removal of the throw() method.
A benefit of this is that "Task exception wasn't retrieved" messages can go
back to being able to print the name of the coroutine function.
Fixes issue #7386.
Signed-off-by: Damien George <damien@micropython.org>
2021-06-14 08:32:51 -04:00
|
|
|
# Save return value of coro to pass up to caller.
|
|
|
|
t.data = er
|
|
|
|
elif t.state is None:
|
|
|
|
# Task is already finished and nothing await'ed on the task,
|
|
|
|
# so call the exception handler.
|
|
|
|
_exc_context["exception"] = exc
|
|
|
|
_exc_context["future"] = t
|
|
|
|
Loop.call_exception_handler(_exc_context)
|
2019-11-13 05:07:58 -05:00
|
|
|
|
|
|
|
|
|
|
|
# Create a new task from a coroutine and run it until it finishes
|
|
|
|
def run(coro):
|
|
|
|
return run_until_complete(create_task(coro))
|
|
|
|
|
|
|
|
|
|
|
|
################################################################################
|
|
|
|
# Event loop wrapper
|
|
|
|
|
|
|
|
|
2020-03-29 23:58:13 -04:00
|
|
|
async def _stopper():
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
_stop_task = None
|
|
|
|
|
|
|
|
|
2019-11-13 05:07:58 -05:00
|
|
|
class Loop:
|
2020-03-30 02:41:42 -04:00
|
|
|
_exc_handler = None
|
|
|
|
|
2020-03-29 21:10:43 -04:00
|
|
|
def create_task(coro):
|
2019-11-13 05:07:58 -05:00
|
|
|
return create_task(coro)
|
|
|
|
|
2020-03-29 21:10:43 -04:00
|
|
|
def run_forever():
|
2020-03-29 23:58:13 -04:00
|
|
|
global _stop_task
|
|
|
|
_stop_task = Task(_stopper(), globals())
|
|
|
|
run_until_complete(_stop_task)
|
2019-11-13 05:07:58 -05:00
|
|
|
# TODO should keep running until .stop() is called, even if there're no tasks left
|
|
|
|
|
2020-03-29 21:10:43 -04:00
|
|
|
def run_until_complete(aw):
|
2019-11-13 05:07:58 -05:00
|
|
|
return run_until_complete(_promote_to_task(aw))
|
|
|
|
|
2020-03-29 23:58:13 -04:00
|
|
|
def stop():
|
|
|
|
global _stop_task
|
|
|
|
if _stop_task is not None:
|
2022-04-20 03:20:07 -04:00
|
|
|
_task_queue.push(_stop_task)
|
2020-03-29 23:58:13 -04:00
|
|
|
# If stop() is called again, do nothing
|
|
|
|
_stop_task = None
|
|
|
|
|
2020-03-29 21:10:43 -04:00
|
|
|
def close():
|
2019-11-13 05:07:58 -05:00
|
|
|
pass
|
|
|
|
|
2020-03-30 02:41:42 -04:00
|
|
|
def set_exception_handler(handler):
|
|
|
|
Loop._exc_handler = handler
|
|
|
|
|
|
|
|
def get_exception_handler():
|
|
|
|
return Loop._exc_handler
|
|
|
|
|
|
|
|
def default_exception_handler(loop, context):
|
|
|
|
print(context["message"])
|
|
|
|
print("future:", context["future"], "coro=", context["future"].coro)
|
|
|
|
sys.print_exception(context["exception"])
|
|
|
|
|
|
|
|
def call_exception_handler(context):
|
|
|
|
(Loop._exc_handler or Loop.default_exception_handler)(Loop, context)
|
|
|
|
|
2019-11-13 05:07:58 -05:00
|
|
|
|
|
|
|
# The runq_len and waitq_len arguments are for legacy uasyncio compatibility
|
|
|
|
def get_event_loop(runq_len=0, waitq_len=0):
|
2020-03-29 21:10:43 -04:00
|
|
|
return Loop
|
2020-04-08 23:15:47 -04:00
|
|
|
|
|
|
|
|
2021-02-11 22:11:18 -05:00
|
|
|
def current_task():
|
|
|
|
return cur_task
|
|
|
|
|
|
|
|
|
2020-04-08 23:15:47 -04:00
|
|
|
def new_event_loop():
|
|
|
|
global _task_queue, _io_queue
|
|
|
|
# TaskQueue of Task instances
|
|
|
|
_task_queue = TaskQueue()
|
|
|
|
# Task queue and poller for stream IO
|
|
|
|
_io_queue = IOQueue()
|
|
|
|
return Loop
|
|
|
|
|
|
|
|
|
|
|
|
# Initialise default event loop
|
|
|
|
new_event_loop()
|