run_tests: make access to shared variables thread safe
This commit is contained in:
parent
4767d23db3
commit
a73f005e00
@ -6,6 +6,7 @@ import sys
|
||||
import platform
|
||||
import argparse
|
||||
import re
|
||||
import threading
|
||||
from glob import glob
|
||||
|
||||
# Tests require at least CPython 3.3. If your default python3 executable
|
||||
@ -197,13 +198,27 @@ def run_micropython(pyb, args, test_file, is_special=False):
|
||||
def run_feature_check(pyb, args, base_path, test_file):
|
||||
return run_micropython(pyb, args, base_path + "/feature_check/" + test_file, is_special=True)
|
||||
|
||||
class ThreadSafeCounter:
|
||||
def __init__(self, start=0):
|
||||
self._value = start
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def add(self, to_add):
|
||||
with self._lock: self._value += to_add
|
||||
|
||||
def append(self, arg):
|
||||
self.add([arg])
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
return self._value
|
||||
|
||||
def run_tests(pyb, tests, args, base_path="."):
|
||||
test_count = 0
|
||||
testcase_count = 0
|
||||
passed_count = 0
|
||||
failed_tests = []
|
||||
skipped_tests = []
|
||||
test_count = ThreadSafeCounter()
|
||||
testcase_count = ThreadSafeCounter()
|
||||
passed_count = ThreadSafeCounter()
|
||||
failed_tests = ThreadSafeCounter([])
|
||||
skipped_tests = ThreadSafeCounter([])
|
||||
|
||||
skip_tests = set()
|
||||
skip_native = False
|
||||
@ -355,8 +370,6 @@ def run_tests(pyb, tests, args, base_path="."):
|
||||
skip_tests.add('micropython/schedule.py') # native code doesn't check pending events
|
||||
|
||||
def run_one_test(test_file):
|
||||
nonlocal test_count, testcase_count, passed_count, failed_tests
|
||||
|
||||
test_file = test_file.replace('\\', '/')
|
||||
test_basename = os.path.basename(test_file)
|
||||
test_name = os.path.splitext(test_basename)[0]
|
||||
@ -417,14 +430,14 @@ def run_tests(pyb, tests, args, base_path="."):
|
||||
skipped_tests.append(test_name)
|
||||
return
|
||||
|
||||
testcase_count += len(output_expected.splitlines())
|
||||
testcase_count.add(len(output_expected.splitlines()))
|
||||
|
||||
filename_expected = test_basename + ".exp"
|
||||
filename_mupy = test_basename + ".out"
|
||||
|
||||
if output_expected == output_mupy:
|
||||
print("pass ", test_file)
|
||||
passed_count += 1
|
||||
passed_count.add(1)
|
||||
rm_f(filename_expected)
|
||||
rm_f(filename_mupy)
|
||||
else:
|
||||
@ -439,18 +452,18 @@ def run_tests(pyb, tests, args, base_path="."):
|
||||
print("FAIL ", test_file)
|
||||
failed_tests.append(test_name)
|
||||
|
||||
test_count += 1
|
||||
test_count.add(1)
|
||||
|
||||
for test_file in tests:
|
||||
run_one_test(test_file)
|
||||
|
||||
print("{} tests performed ({} individual testcases)".format(test_count, testcase_count))
|
||||
print("{} tests passed".format(passed_count))
|
||||
print("{} tests performed ({} individual testcases)".format(test_count.value, testcase_count.value))
|
||||
print("{} tests passed".format(passed_count.value))
|
||||
|
||||
if len(skipped_tests) > 0:
|
||||
print("{} tests skipped: {}".format(len(skipped_tests), ' '.join(skipped_tests)))
|
||||
if len(failed_tests) > 0:
|
||||
print("{} tests failed: {}".format(len(failed_tests), ' '.join(failed_tests)))
|
||||
if len(skipped_tests.value) > 0:
|
||||
print("{} tests skipped: {}".format(len(skipped_tests.value), ' '.join(skipped_tests.value)))
|
||||
if len(failed_tests.value) > 0:
|
||||
print("{} tests failed: {}".format(len(failed_tests.value), ' '.join(failed_tests.value)))
|
||||
return False
|
||||
|
||||
# all tests succeeded
|
||||
|
Loading…
x
Reference in New Issue
Block a user