tests: Rename run-tests to run-tests.py for consistency.
Signed-off-by: Damien George <damien@micropython.org>
This commit is contained in:
parent
b24fcd7aec
commit
6129b8e401
|
@ -24,7 +24,7 @@ jobs:
|
||||||
run: source tools/ci.sh && ci_unix_minimal_run_tests
|
run: source tools/ci.sh && ci_unix_minimal_run_tests
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
||||||
reproducible:
|
reproducible:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -49,7 +49,7 @@ jobs:
|
||||||
run: source tools/ci.sh && ci_unix_standard_run_perfbench
|
run: source tools/ci.sh && ci_unix_standard_run_perfbench
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -76,7 +76,7 @@ jobs:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
||||||
coverage_32bit:
|
coverage_32bit:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -94,7 +94,7 @@ jobs:
|
||||||
run: source tools/ci.sh && ci_unix_coverage_32bit_run_native_mpy_tests
|
run: source tools/ci.sh && ci_unix_coverage_32bit_run_native_mpy_tests
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
||||||
nanbox:
|
nanbox:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -108,7 +108,7 @@ jobs:
|
||||||
run: source tools/ci.sh && ci_unix_nanbox_run_tests
|
run: source tools/ci.sh && ci_unix_nanbox_run_tests
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
||||||
float:
|
float:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -120,7 +120,7 @@ jobs:
|
||||||
run: source tools/ci.sh && ci_unix_float_run_tests
|
run: source tools/ci.sh && ci_unix_float_run_tests
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
||||||
stackless_clang:
|
stackless_clang:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
@ -134,7 +134,7 @@ jobs:
|
||||||
run: source tools/ci.sh && ci_unix_stackless_clang_run_tests
|
run: source tools/ci.sh && ci_unix_stackless_clang_run_tests
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
||||||
float_clang:
|
float_clang:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
@ -148,7 +148,7 @@ jobs:
|
||||||
run: source tools/ci.sh && ci_unix_float_clang_run_tests
|
run: source tools/ci.sh && ci_unix_float_clang_run_tests
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
||||||
settrace:
|
settrace:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -160,7 +160,7 @@ jobs:
|
||||||
run: source tools/ci.sh && ci_unix_settrace_run_tests
|
run: source tools/ci.sh && ci_unix_settrace_run_tests
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
||||||
settrace_stackless:
|
settrace_stackless:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -172,7 +172,7 @@ jobs:
|
||||||
run: source tools/ci.sh && ci_unix_settrace_stackless_run_tests
|
run: source tools/ci.sh && ci_unix_settrace_stackless_run_tests
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
||||||
macos:
|
macos:
|
||||||
runs-on: macos-11.0
|
runs-on: macos-11.0
|
||||||
|
@ -185,4 +185,4 @@ jobs:
|
||||||
run: source tools/ci.sh && ci_unix_macos_run_tests
|
run: source tools/ci.sh && ci_unix_macos_run_tests
|
||||||
- name: Print failures
|
- name: Print failures
|
||||||
if: failure()
|
if: failure()
|
||||||
run: tests/run-tests --print-failures
|
run: tests/run-tests.py --print-failures
|
||||||
|
|
|
@ -273,7 +273,7 @@ To run a selection of tests on a board/device connected over USB use:
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ cd tests
|
$ cd tests
|
||||||
$ ./run-tests --target minimal --device /dev/ttyACM0
|
$ ./run-tests.py --target minimal --device /dev/ttyACM0
|
||||||
|
|
||||||
See also :ref:`writingtests`.
|
See also :ref:`writingtests`.
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ Writing tests
|
||||||
=============
|
=============
|
||||||
|
|
||||||
Tests in MicroPython are located at the path ``tests/``. The following is a listing of
|
Tests in MicroPython are located at the path ``tests/``. The following is a listing of
|
||||||
key directories and the run-tests runner script:
|
key directories and the run-tests.py runner script:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ key directories and the run-tests runner script:
|
||||||
├── extmod
|
├── extmod
|
||||||
├── float
|
├── float
|
||||||
├── micropython
|
├── micropython
|
||||||
├── run-tests
|
├── run-tests.py
|
||||||
...
|
...
|
||||||
|
|
||||||
There are subfolders maintained to categorize the tests. Add a test by creating a new file in one of the
|
There are subfolders maintained to categorize the tests. Add a test by creating a new file in one of the
|
||||||
|
@ -54,17 +54,17 @@ The other way to run tests, which is useful when running on targets other than t
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ cd tests
|
$ cd tests
|
||||||
$ ./run-tests
|
$ ./run-tests.py
|
||||||
|
|
||||||
Then to run on a board:
|
Then to run on a board:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ ./run-tests --target minimal --device /dev/ttyACM0
|
$ ./run-tests.py --target minimal --device /dev/ttyACM0
|
||||||
|
|
||||||
And to run only a certain set of tests (eg a directory):
|
And to run only a certain set of tests (eg a directory):
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ ./run-tests -d basics
|
$ ./run-tests.py -d basics
|
||||||
$ ./run-tests float/builtin*.py
|
$ ./run-tests.py float/builtin*.py
|
||||||
|
|
|
@ -6,7 +6,7 @@ import os
|
||||||
"""
|
"""
|
||||||
Execute it like this:
|
Execute it like this:
|
||||||
|
|
||||||
python3 run-tests --target wipy --device 192.168.1.1 ../cc3200/tools/smoke.py
|
python3 run-tests.py --target wipy --device 192.168.1.1 ../cc3200/tools/smoke.py
|
||||||
"""
|
"""
|
||||||
|
|
||||||
pin_map = [23, 24, 11, 12, 13, 14, 15, 16, 17, 22, 28, 10, 9, 8, 7, 6, 30, 31, 3, 0, 4, 5]
|
pin_map = [23, 24, 11, 12, 13, 14, 15, 16, 17, 22, 28, 10, 9, 8, 7, 6, 30, 31, 3, 0, 4, 5]
|
||||||
|
|
|
@ -63,8 +63,8 @@ $(BUILD)/micropython.js: $(OBJ) library.js wrapper.js
|
||||||
min: $(BUILD)/micropython.js
|
min: $(BUILD)/micropython.js
|
||||||
uglifyjs $< -c -o $(BUILD)/micropython.min.js
|
uglifyjs $< -c -o $(BUILD)/micropython.min.js
|
||||||
|
|
||||||
test: $(BUILD)/micropython.js $(TOP)/tests/run-tests
|
test: $(BUILD)/micropython.js $(TOP)/tests/run-tests.py
|
||||||
$(eval DIRNAME=ports/$(notdir $(CURDIR)))
|
$(eval DIRNAME=ports/$(notdir $(CURDIR)))
|
||||||
cd $(TOP)/tests && MICROPY_MICROPYTHON=../ports/javascript/node_run.sh ./run-tests
|
cd $(TOP)/tests && MICROPY_MICROPYTHON=../ports/javascript/node_run.sh ./run-tests.py
|
||||||
|
|
||||||
include $(TOP)/py/mkrules.mk
|
include $(TOP)/py/mkrules.mk
|
||||||
|
|
|
@ -10,7 +10,7 @@ CFLAGS += -DTEST
|
||||||
|
|
||||||
$(BUILD)/test_main.o: $(BUILD)/genhdr/tests.h
|
$(BUILD)/test_main.o: $(BUILD)/genhdr/tests.h
|
||||||
$(BUILD)/genhdr/tests.h:
|
$(BUILD)/genhdr/tests.h:
|
||||||
(cd $(TOP)/tests; ./run-tests --target=qemu-arm --write-exp)
|
(cd $(TOP)/tests; ./run-tests.py --target=qemu-arm --write-exp)
|
||||||
$(Q)echo "Generating $@";(cd $(TOP)/tests; ../tools/tinytest-codegen.py) > $@
|
$(Q)echo "Generating $@";(cd $(TOP)/tests; ../tools/tinytest-codegen.py) > $@
|
||||||
|
|
||||||
$(BUILD)/lib/tinytest/tinytest.o: CFLAGS += -DNO_FORKING
|
$(BUILD)/lib/tinytest/tinytest.o: CFLAGS += -DNO_FORKING
|
||||||
|
|
|
@ -292,17 +292,17 @@ include $(TOP)/py/mkrules.mk
|
||||||
|
|
||||||
.PHONY: test test_full
|
.PHONY: test test_full
|
||||||
|
|
||||||
test: $(PROG) $(TOP)/tests/run-tests
|
test: $(PROG) $(TOP)/tests/run-tests.py
|
||||||
$(eval DIRNAME=ports/$(notdir $(CURDIR)))
|
$(eval DIRNAME=ports/$(notdir $(CURDIR)))
|
||||||
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests
|
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests.py
|
||||||
|
|
||||||
test_full: $(PROG) $(TOP)/tests/run-tests
|
test_full: $(PROG) $(TOP)/tests/run-tests.py
|
||||||
$(eval DIRNAME=ports/$(notdir $(CURDIR)))
|
$(eval DIRNAME=ports/$(notdir $(CURDIR)))
|
||||||
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests
|
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests.py
|
||||||
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests -d thread
|
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests.py -d thread
|
||||||
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests --emit native
|
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests.py --emit native
|
||||||
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests --via-mpy $(RUN_TESTS_MPY_CROSS_FLAGS) -d basics float micropython
|
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests.py --via-mpy $(RUN_TESTS_MPY_CROSS_FLAGS) -d basics float micropython
|
||||||
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests --via-mpy $(RUN_TESTS_MPY_CROSS_FLAGS) --emit native -d basics float micropython
|
cd $(TOP)/tests && MICROPY_MICROPYTHON=../$(DIRNAME)/$(PROG) ./run-tests.py --via-mpy $(RUN_TESTS_MPY_CROSS_FLAGS) --emit native -d basics float micropython
|
||||||
cat $(TOP)/tests/basics/0prelim.py | ./$(PROG) | grep -q 'abc'
|
cat $(TOP)/tests/basics/0prelim.py | ./$(PROG) | grep -q 'abc'
|
||||||
|
|
||||||
test_gcov: test_full
|
test_gcov: test_full
|
||||||
|
|
|
@ -37,14 +37,14 @@ build:
|
||||||
test_script:
|
test_script:
|
||||||
- ps: |
|
- ps: |
|
||||||
cd (Join-Path $env:APPVEYOR_BUILD_FOLDER 'tests')
|
cd (Join-Path $env:APPVEYOR_BUILD_FOLDER 'tests')
|
||||||
& $env:MICROPY_CPYTHON3 run-tests
|
& $env:MICROPY_CPYTHON3 run-tests.py
|
||||||
if ($LASTEXITCODE -ne 0) {
|
if ($LASTEXITCODE -ne 0) {
|
||||||
& $env:MICROPY_CPYTHON3 run-tests --print-failures
|
& $env:MICROPY_CPYTHON3 run-tests.py --print-failures
|
||||||
throw "Test failure"
|
throw "Test failure"
|
||||||
}
|
}
|
||||||
& $env:MICROPY_CPYTHON3 run-tests --via-mpy -d basics float micropython
|
& $env:MICROPY_CPYTHON3 run-tests.py --via-mpy -d basics float micropython
|
||||||
if ($LASTEXITCODE -ne 0) {
|
if ($LASTEXITCODE -ne 0) {
|
||||||
& $env:MICROPY_CPYTHON3 run-tests --print-failures
|
& $env:MICROPY_CPYTHON3 run-tests.py --print-failures
|
||||||
throw "Test failure"
|
throw "Test failure"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,17 +71,17 @@ after_test:
|
||||||
throw "$env:MSYSTEM mpy_cross build exited with code $LASTEXITCODE"
|
throw "$env:MSYSTEM mpy_cross build exited with code $LASTEXITCODE"
|
||||||
}
|
}
|
||||||
cd (Join-Path $env:APPVEYOR_BUILD_FOLDER 'tests')
|
cd (Join-Path $env:APPVEYOR_BUILD_FOLDER 'tests')
|
||||||
$testArgs = @('run-tests')
|
$testArgs = @('run-tests.py')
|
||||||
foreach ($skipTest in @('math_fun', 'float2int_double', 'float_parse', 'math_domain_special')) {
|
foreach ($skipTest in @('math_fun', 'float2int_double', 'float_parse', 'math_domain_special')) {
|
||||||
$testArgs = $testArgs + '-e' + $skipTest
|
$testArgs = $testArgs + '-e' + $skipTest
|
||||||
}
|
}
|
||||||
& $env:MICROPY_CPYTHON3 $testArgs
|
& $env:MICROPY_CPYTHON3 $testArgs
|
||||||
if ($LASTEXITCODE -ne 0) {
|
if ($LASTEXITCODE -ne 0) {
|
||||||
& $env:MICROPY_CPYTHON3 run-tests --print-failures
|
& $env:MICROPY_CPYTHON3 run-tests.py --print-failures
|
||||||
throw "Test failure"
|
throw "Test failure"
|
||||||
}
|
}
|
||||||
& $env:MICROPY_CPYTHON3 ($testArgs + @('--via-mpy', '-d', 'basics', 'float', 'micropython'))
|
& $env:MICROPY_CPYTHON3 ($testArgs + @('--via-mpy', '-d', 'basics', 'float', 'micropython'))
|
||||||
if ($LASTEXITCODE -ne 0) {
|
if ($LASTEXITCODE -ne 0) {
|
||||||
& $env:MICROPY_CPYTHON3 run-tests --print-failures
|
& $env:MICROPY_CPYTHON3 run-tests.py --print-failures
|
||||||
throw "Test failure"
|
throw "Test failure"
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,11 +90,11 @@ Running the tests
|
||||||
This is similar for all ports:
|
This is similar for all ports:
|
||||||
|
|
||||||
cd ../../tests
|
cd ../../tests
|
||||||
python ./run-tests
|
python ./run-tests.py
|
||||||
|
|
||||||
Though when running on Cygwin and using Cygwin's Python installation you'll need:
|
Though when running on Cygwin and using Cygwin's Python installation you'll need:
|
||||||
|
|
||||||
python3 ./run-tests
|
python3 ./run-tests.py
|
||||||
|
|
||||||
Depending on the combination of platform and Python version used it might be
|
Depending on the combination of platform and Python version used it might be
|
||||||
needed to first set the MICROPY_MICROPYTHON environment variable to
|
needed to first set the MICROPY_MICROPYTHON environment variable to
|
||||||
|
|
|
@ -8,8 +8,8 @@
|
||||||
# ./make-bin-testsuite BOARD=qemu_cortex_m3 run
|
# ./make-bin-testsuite BOARD=qemu_cortex_m3 run
|
||||||
#
|
#
|
||||||
|
|
||||||
(cd ../../tests; ./run-tests --write-exp)
|
(cd ../../tests; ./run-tests.py --write-exp)
|
||||||
(cd ../../tests; ./run-tests --list-tests --target=minimal \
|
(cd ../../tests; ./run-tests.py --list-tests --target=minimal \
|
||||||
-e async -e intbig -e int_big -e builtin_help -e memstats -e bytes_compare3 -e class_reverse_op \
|
-e async -e intbig -e int_big -e builtin_help -e memstats -e bytes_compare3 -e class_reverse_op \
|
||||||
-e /set -e frozenset -e complex -e const -e native -e viper \
|
-e /set -e frozenset -e complex -e const -e native -e viper \
|
||||||
-e 'float_divmod\.' -e float_parse_doubleprec -e float/true_value -e float/types \
|
-e 'float_divmod\.' -e float_parse_doubleprec -e float/true_value -e float/types \
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
This directory contains tests for various functionality areas of MicroPython.
|
This directory contains tests for various functionality areas of MicroPython.
|
||||||
To run all stable tests, run "run-tests" script in this directory.
|
To run all stable tests, run "run-tests.py" script in this directory.
|
||||||
|
|
||||||
Tests of capabilities not supported on all platforms should be written
|
Tests of capabilities not supported on all platforms should be written
|
||||||
to check for the capability being present. If it is not, the test
|
to check for the capability being present. If it is not, the test
|
||||||
should merely output 'SKIP' followed by the line terminator, and call
|
should merely output 'SKIP' followed by the line terminator, and call
|
||||||
sys.exit() to raise SystemExit, instead of attempting to test the
|
sys.exit() to raise SystemExit, instead of attempting to test the
|
||||||
missing capability. The testing framework (run-tests in this
|
missing capability. The testing framework (run-tests.py in this
|
||||||
directory, test_main.c in qemu_arm) recognizes this as a skipped test.
|
directory, test_main.c in qemu_arm) recognizes this as a skipped test.
|
||||||
|
|
||||||
There are a few features for which this mechanism cannot be used to
|
There are a few features for which this mechanism cannot be used to
|
||||||
condition a test. The run-tests script uses small scripts in the
|
condition a test. The run-tests.py script uses small scripts in the
|
||||||
feature_check directory to check whether each such feature is present,
|
feature_check directory to check whether each such feature is present,
|
||||||
and skips the relevant tests if not.
|
and skips the relevant tests if not.
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ def copy_partition(src, dest):
|
||||||
if sz - addr < 4096:
|
if sz - addr < 4096:
|
||||||
blk = blk[: sz - addr]
|
blk = blk[: sz - addr]
|
||||||
if addr & 0xFFFF == 0:
|
if addr & 0xFFFF == 0:
|
||||||
# need to show progress to run-tests else it times out
|
# need to show progress to run-tests.py else it times out
|
||||||
print(" ... 0x{:06x}".format(addr))
|
print(" ... 0x{:06x}".format(addr))
|
||||||
src.readblocks(addr >> 12, blk)
|
src.readblocks(addr >> 12, blk)
|
||||||
dest.writeblocks(addr >> 12, blk)
|
dest.writeblocks(addr >> 12, blk)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
This directory doesn't contain real tests, but code snippets to detect
|
This directory doesn't contain real tests, but code snippets to detect
|
||||||
various interpreter features, which can't be/inconvenient to detecte by
|
various interpreter features, which can't be/inconvenient to detecte by
|
||||||
other means. Scripts here are executed by run-tests at the beginning of
|
other means. Scripts here are executed by run-tests.py at the beginning of
|
||||||
testsuite to decide what other test groups to run/exclude.
|
testsuite to decide what other test groups to run/exclude.
|
||||||
|
|
|
@ -8,5 +8,5 @@ of JVM.
|
||||||
|
|
||||||
For example, for OpenJDK 7 on x86_64, following may work:
|
For example, for OpenJDK 7 on x86_64, following may work:
|
||||||
|
|
||||||
LD_LIBRARY_PATH=/usr/lib/jvm/java-7-openjdk-amd64/jre/lib/amd64/server ./run-tests jni/*.py
|
LD_LIBRARY_PATH=/usr/lib/jvm/java-7-openjdk-amd64/jre/lib/amd64/server ./run-tests.py jni/*.py
|
||||||
|
|
||||||
|
|
|
@ -8,4 +8,4 @@ not yet fully correspond to the functional specification above.
|
||||||
So far, these tests are not run as part of the main testsuite and need
|
So far, these tests are not run as part of the main testsuite and need
|
||||||
to be run seperately (from the main test/ directory):
|
to be run seperately (from the main test/ directory):
|
||||||
|
|
||||||
./run-tests net_hosted/*.py
|
./run-tests.py net_hosted/*.py
|
||||||
|
|
|
@ -2,4 +2,4 @@ This directory contains network tests which require Internet connection.
|
||||||
Note that these tests are not run as part of the main testsuite and need
|
Note that these tests are not run as part of the main testsuite and need
|
||||||
to be run seperately (from the main test/ directory):
|
to be run seperately (from the main test/ directory):
|
||||||
|
|
||||||
./run-tests net_inet/*.py
|
./run-tests.py net_inet/*.py
|
||||||
|
|
|
@ -67,7 +67,7 @@ def main():
|
||||||
cmd_parser.add_argument("files", nargs="*", help="input test files")
|
cmd_parser.add_argument("files", nargs="*", help="input test files")
|
||||||
args = cmd_parser.parse_args()
|
args = cmd_parser.parse_args()
|
||||||
|
|
||||||
# Note pyboard support is copied over from run-tests, not testes, and likely needs revamping
|
# Note pyboard support is copied over from run-tests.py, not tests, and likely needs revamping
|
||||||
if args.pyboard:
|
if args.pyboard:
|
||||||
import pyboard
|
import pyboard
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#
|
#
|
||||||
# This is minimal MicroPython variant of run-tests script, which uses
|
# This is minimal MicroPython variant of run-tests.py script, which uses
|
||||||
# .exp files as generated by run-tests --write-exp. It is useful to run
|
# .exp files as generated by run-tests.py --write-exp. It is useful to run
|
||||||
# testsuite on systems which have neither CPython3 nor unix shell.
|
# testsuite on systems which have neither CPython3 nor unix shell.
|
||||||
# This script is intended to be run by the same interpreter executable
|
# This script is intended to be run by the same interpreter executable
|
||||||
# which is to be tested, so should use minimal language functionality.
|
# which is to be tested, so should use minimal language functionality.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
#
|
#
|
||||||
# This is plain shell variant of run-tests script, which uses .exp files
|
# This is plain shell variant of run-tests.py script, which uses .exp files
|
||||||
# as generated by run-tests --write-exp. It is useful to run testsuite
|
# as generated by run-tests.py --write-exp. It is useful to run testsuite
|
||||||
# on embedded systems which don't have CPython3.
|
# on embedded systems which don't have CPython3.
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
|
@ -258,7 +258,7 @@ def run_tests(pyb, tests, args, result_dir):
|
||||||
if not (args.list_tests or args.write_exp):
|
if not (args.list_tests or args.write_exp):
|
||||||
# Even if we run completely different tests in a different directory,
|
# Even if we run completely different tests in a different directory,
|
||||||
# we need to access feature_checks from the same directory as the
|
# we need to access feature_checks from the same directory as the
|
||||||
# run-tests script itself so use base_path.
|
# run-tests.py script itself so use base_path.
|
||||||
|
|
||||||
# Check if micropython.native is supported, and skip such tests if it's not
|
# Check if micropython.native is supported, and skip such tests if it's not
|
||||||
output = run_feature_check(pyb, args, base_path, 'native_check.py')
|
output = run_feature_check(pyb, args, base_path, 'native_check.py')
|
||||||
|
@ -582,19 +582,19 @@ Tests are discovered by scanning test directories for .py files or using the
|
||||||
specified test files. If test files nor directories are specified, the script
|
specified test files. If test files nor directories are specified, the script
|
||||||
expects to be ran in the tests directory (where this file is located) and the
|
expects to be ran in the tests directory (where this file is located) and the
|
||||||
builtin tests suitable for the target platform are ran.
|
builtin tests suitable for the target platform are ran.
|
||||||
When running tests, run-tests compares the MicroPython output of the test with the output
|
When running tests, run-tests.py compares the MicroPython output of the test with the output
|
||||||
produced by running the test through CPython unless a <test>.exp file is found, in which
|
produced by running the test through CPython unless a <test>.exp file is found, in which
|
||||||
case it is used as comparison.
|
case it is used as comparison.
|
||||||
If a test fails, run-tests produces a pair of <test>.out and <test>.exp files in the result
|
If a test fails, run-tests.py produces a pair of <test>.out and <test>.exp files in the result
|
||||||
directory with the MicroPython output and the expectations, respectively.
|
directory with the MicroPython output and the expectations, respectively.
|
||||||
''',
|
''',
|
||||||
epilog='''\
|
epilog='''\
|
||||||
Options -i and -e can be multiple and processed in the order given. Regex
|
Options -i and -e can be multiple and processed in the order given. Regex
|
||||||
"search" (vs "match") operation is used. An action (include/exclude) of
|
"search" (vs "match") operation is used. An action (include/exclude) of
|
||||||
the last matching regex is used:
|
the last matching regex is used:
|
||||||
run-tests -i async - exclude all, then include tests containing "async" anywhere
|
run-tests.py -i async - exclude all, then include tests containing "async" anywhere
|
||||||
run-tests -e '/big.+int' - include all, then exclude by regex
|
run-tests.py -e '/big.+int' - include all, then exclude by regex
|
||||||
run-tests -e async -i async_foo - include all, exclude async, yet still include async_foo
|
run-tests.py -e async -i async_foo - include all, exclude async, yet still include async_foo
|
||||||
''')
|
''')
|
||||||
cmd_parser.add_argument('--target', default='unix', help='the target platform')
|
cmd_parser.add_argument('--target', default='unix', help='the target platform')
|
||||||
cmd_parser.add_argument('--device', default='/dev/ttyACM0', help='the serial device or the IP address of the pyboard')
|
cmd_parser.add_argument('--device', default='/dev/ttyACM0', help='the serial device or the IP address of the pyboard')
|
|
@ -306,7 +306,7 @@ function ci_unix_minimal_build {
|
||||||
}
|
}
|
||||||
|
|
||||||
function ci_unix_minimal_run_tests {
|
function ci_unix_minimal_run_tests {
|
||||||
(cd tests && MICROPY_CPYTHON3=python3 MICROPY_MICROPYTHON=../ports/unix/micropython-minimal ./run-tests -e exception_chain -e self_type_check -e subclass_native_init -d basics)
|
(cd tests && MICROPY_CPYTHON3=python3 MICROPY_MICROPYTHON=../ports/unix/micropython-minimal ./run-tests.py -e exception_chain -e self_type_check -e subclass_native_init -d basics)
|
||||||
}
|
}
|
||||||
|
|
||||||
function ci_unix_standard_build {
|
function ci_unix_standard_build {
|
||||||
|
@ -441,7 +441,7 @@ function ci_unix_macos_run_tests {
|
||||||
# - OSX has poor time resolution and these uasyncio tests do not have correct output
|
# - OSX has poor time resolution and these uasyncio tests do not have correct output
|
||||||
# - import_pkg7 has a problem with relative imports
|
# - import_pkg7 has a problem with relative imports
|
||||||
# - urandom_basic has a problem with getrandbits(0)
|
# - urandom_basic has a problem with getrandbits(0)
|
||||||
(cd tests && ./run-tests --exclude 'uasyncio_(basic|heaplock|lock|wait_task)' --exclude 'import_pkg7.py' --exclude 'urandom_basic.py')
|
(cd tests && ./run-tests.py --exclude 'uasyncio_(basic|heaplock|lock|wait_task)' --exclude 'import_pkg7.py' --exclude 'urandom_basic.py')
|
||||||
}
|
}
|
||||||
|
|
||||||
########################################################################################
|
########################################################################################
|
||||||
|
|
Loading…
Reference in New Issue