diff --git a/py/scheduler.c b/py/scheduler.c index d4d7975087..b559091b83 100644 --- a/py/scheduler.c +++ b/py/scheduler.c @@ -108,6 +108,7 @@ void mp_sched_lock(void) { void mp_sched_unlock(void) { mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION(); + assert(MP_STATE_VM(sched_state) < 0); if (++MP_STATE_VM(sched_state) == 0) { // vm became unlocked if (MP_STATE_VM(mp_pending_exception) != MP_OBJ_NULL || mp_sched_num_pending()) { diff --git a/tests/thread/stress_schedule.py b/tests/thread/stress_schedule.py new file mode 100644 index 0000000000..c5a402b3a3 --- /dev/null +++ b/tests/thread/stress_schedule.py @@ -0,0 +1,49 @@ +# This test ensures that the scheduler doesn't trigger any assertions +# while dealing with concurrent access from multiple threads. + +import _thread +import utime +import micropython +import gc + +try: + micropython.schedule +except AttributeError: + print("SKIP") + raise SystemExit + +gc.disable() + +n = 0 # How many times the task successfully ran. + + +def task(x): + global n + n += 1 + + +def thread(): + while True: + try: + micropython.schedule(task, None) + except RuntimeError: + # Queue full, back off. + utime.sleep_ms(10) + + +for i in range(8): + _thread.start_new_thread(thread, ()) + +_NUM_TASKS = const(10000) +_TIMEOUT_MS = const(10000) + +# Wait up to 10 seconds for 10000 tasks to be scheduled. +t = utime.ticks_ms() +while n < _NUM_TASKS and utime.ticks_diff(utime.ticks_ms(), t) < _TIMEOUT_MS: + pass + +if n < _NUM_TASKS: + # Not all the tasks were scheduled, likely the scheduler stopped working. + print(n) +else: + print("PASS") diff --git a/tests/thread/stress_schedule.py.exp b/tests/thread/stress_schedule.py.exp new file mode 100644 index 0000000000..7ef22e9a43 --- /dev/null +++ b/tests/thread/stress_schedule.py.exp @@ -0,0 +1 @@ +PASS