tests/run-perfbench.py: Return error code if any test fails on target.

Signed-off-by: Damien George <damien@micropython.org>
This commit is contained in:
Damien George 2022-05-17 14:00:01 +10:00
parent d7cf8a3b9d
commit 6f68a8c240

View File

@ -88,6 +88,7 @@ def run_benchmark_on_target(target, script):
def run_benchmarks(target, param_n, param_m, n_average, test_list): def run_benchmarks(target, param_n, param_m, n_average, test_list):
skip_complex = run_feature_test(target, "complex") != "complex" skip_complex = run_feature_test(target, "complex") != "complex"
skip_native = run_feature_test(target, "native_check") != "native" skip_native = run_feature_test(target, "native_check") != "native"
target_had_error = False
for test_file in sorted(test_list): for test_file in sorted(test_list):
print(test_file + ": ", end="") print(test_file + ": ", end="")
@ -147,6 +148,8 @@ def run_benchmarks(target, param_n, param_m, n_average, test_list):
error = "FAIL truth" error = "FAIL truth"
if error is not None: if error is not None:
if not error.startswith("SKIP"):
target_had_error = True
print(error) print(error)
else: else:
t_avg, t_sd = compute_stats(times) t_avg, t_sd = compute_stats(times)
@ -162,6 +165,8 @@ def run_benchmarks(target, param_n, param_m, n_average, test_list):
sys.stdout.flush() sys.stdout.flush()
return target_had_error
def parse_output(filename): def parse_output(filename):
with open(filename) as f: with open(filename) as f:
@ -279,12 +284,15 @@ def main():
print("N={} M={} n_average={}".format(N, M, n_average)) print("N={} M={} n_average={}".format(N, M, n_average))
run_benchmarks(target, N, M, n_average, tests) target_had_error = run_benchmarks(target, N, M, n_average, tests)
if isinstance(target, pyboard.Pyboard): if isinstance(target, pyboard.Pyboard):
target.exit_raw_repl() target.exit_raw_repl()
target.close() target.close()
if target_had_error:
sys.exit(1)
if __name__ == "__main__": if __name__ == "__main__":
main() main()