Skip to content

Commit

Permalink
Fix CCTRL tests on CI - CCTRL cluster is on endpoint 1 (project-chip#…
Browse files Browse the repository at this point in the history
…36874)

* Fix CCTRL tests on CI - CCTRL cluster is on endpoint 1

* Fail CI tests if any test cases were skipped

* Add compatibility flag to mobile-device-test.py

* Comment out skipped tests

* Change executable mode

* Improve --test-case command line option

* Disable skipped test cases on TC_SWTCH

* Disable TC_LVL_2_3 on CI

* Make TestBdxTransfer quiet

* Verify testing support before running actual tests

* Add missing definition
  • Loading branch information
arkq authored Dec 19, 2024
1 parent 204fb27 commit ddc48d9
Show file tree
Hide file tree
Showing 14 changed files with 85 additions and 70 deletions.
32 changes: 17 additions & 15 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -520,24 +520,26 @@ jobs:
echo "TRACE_TEST_JSON: out/trace_data/test-{SCRIPT_BASE_NAME}" >> /tmp/test_env.yaml
echo "TRACE_TEST_PERFETTO: out/trace_data/test-{SCRIPT_BASE_NAME}" >> /tmp/test_env.yaml
- name: Verify Testing Support
run: |
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/test_testing/test_IDM_10_4.py'
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/test_testing/test_TC_ICDM_2_1.py'
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/test_testing/test_TC_SC_7_1.py'
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/test_testing/TestDecorators.py'
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/TestChoiceConformanceSupport.py'
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/TestConformanceSupport.py'
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/TestConformanceTest.py'
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/TestIdChecks.py'
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/TestMatterTestingSupport.py'
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/TestSpecParsingDeviceType.py'
scripts/run_in_python_env.sh out/venv 'python3 src/python_testing/TestSpecParsingSupport.py'
- name: Run Tests
run: |
mkdir -p out/trace_data
scripts/run_in_python_env.sh out/venv './scripts/tests/run_python_test.py --load-from-env /tmp/test_env.yaml --script src/controller/python/test/test_scripts/mobile-device-test.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/execute_python_tests.py --env-file /tmp/test_env.yaml --search-directory src/python_testing'
scripts/run_in_python_env.sh out/venv './scripts/tests/TestTimeSyncTrustedTimeSourceRunner.py --all-clusters out/linux-x64-all-clusters-ipv6only-no-ble-no-wifi-tsan-clang-test/chip-all-clusters-app'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/TestIdChecks.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/TestSpecParsingDeviceType.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/TestConformanceSupport.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/TestConformanceTest.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/TestChoiceConformanceSupport.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/TestMatterTestingSupport.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/TestSpecParsingSupport.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/test_testing/test_TC_ICDM_2_1.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/test_testing/test_IDM_10_4.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/test_testing/test_TC_SC_7_1.py'
scripts/run_in_python_env.sh out/venv 'python3 ./src/python_testing/test_testing/TestDecorators.py'
scripts/run_in_python_env.sh out/venv 'scripts/tests/run_python_test.py --load-from-env /tmp/test_env.yaml --script src/controller/python/test/test_scripts/mobile-device-test.py'
scripts/run_in_python_env.sh out/venv 'src/python_testing/execute_python_tests.py --env-file /tmp/test_env.yaml --search-directory src/python_testing'
scripts/run_in_python_env.sh out/venv 'scripts/tests/TestTimeSyncTrustedTimeSourceRunner.py --all-clusters out/linux-x64-all-clusters-ipv6only-no-ble-no-wifi-tsan-clang-test/chip-all-clusters-app'
- name: Uploading core files
uses: actions/upload-artifact@v4
Expand Down
9 changes: 7 additions & 2 deletions scripts/tests/run_python_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,8 +215,13 @@ def main_impl(app: str, factory_reset: bool, factory_reset_app_only: bool, app_a
app_process.p.stdin.close()
app_pid = app_process.p.pid

script_command = [script, "--paa-trust-store-path", os.path.join(DEFAULT_CHIP_ROOT, MATTER_DEVELOPMENT_PAA_ROOT_CERTS),
'--log-format', '%(message)s', "--app-pid", str(app_pid)] + shlex.split(script_args)
script_command = [
script,
"--fail-on-skipped",
"--paa-trust-store-path", os.path.join(DEFAULT_CHIP_ROOT, MATTER_DEVELOPMENT_PAA_ROOT_CERTS),
"--log-format", '%(message)s',
"--app-pid", str(app_pid),
] + shlex.split(script_args)

if script_gdb:
#
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -266,8 +266,11 @@ def do_tests(controller_nodeid, device_nodeid, address, timeout, discriminator,
type=int,
default=0,
help="The PID of the app against which the test is going to run")
@click.option('--fail-on-skipped',
is_flag=True,
help="Fail the test if any test cases are skipped")
def run(controller_nodeid, device_nodeid, address, timeout, discriminator, setup_pin, enable_test, disable_test, log_level,
log_format, print_test_list, paa_trust_store_path, trace_to, app_pid):
log_format, print_test_list, paa_trust_store_path, trace_to, app_pid, fail_on_skipped):
coloredlogs.install(level=log_level, fmt=log_format, logger=logger)

if print_test_list:
Expand Down
4 changes: 2 additions & 2 deletions src/python_testing/TC_CCTRL_2_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
# --commissioning-method on-network
# --discriminator 1234
# --passcode 20202021
# --endpoint 0
# --endpoint 1
# --trace-to json:${TRACE_TEST_JSON}.json
# --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto
# factory-reset: true
Expand All @@ -46,7 +46,7 @@
# --commissioning-method on-network
# --discriminator 1234
# --passcode 20202021
# --endpoint 0
# --endpoint 1
# --trace-to json:${TRACE_TEST_JSON}.json
# --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto
# factory-reset: true
Expand Down
4 changes: 2 additions & 2 deletions src/python_testing/TC_CCTRL_2_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
# --commissioning-method on-network
# --discriminator 1234
# --passcode 20202021
# --endpoint 0
# --endpoint 1
# --string-arg th_server_app_path:${ALL_CLUSTERS_APP}
# --trace-to json:${TRACE_TEST_JSON}.json
# --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto
Expand All @@ -47,7 +47,7 @@
# --commissioning-method on-network
# --discriminator 1234
# --passcode 20202021
# --endpoint 0
# --endpoint 1
# --string-arg th_server_app_path:${ALL_CLUSTERS_APP}
# --trace-to json:${TRACE_TEST_JSON}.json
# --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto
Expand Down
4 changes: 2 additions & 2 deletions src/python_testing/TC_CCTRL_2_3.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
# --commissioning-method on-network
# --discriminator 1234
# --passcode 20202021
# --endpoint 0
# --endpoint 1
# --string-arg th_server_app_path:${ALL_CLUSTERS_APP}
# --trace-to json:${TRACE_TEST_JSON}.json
# --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto
Expand All @@ -47,7 +47,7 @@
# --commissioning-method on-network
# --discriminator 1234
# --passcode 20202021
# --endpoint 0
# --endpoint 1
# --string-arg th_server_app_path:${ALL_CLUSTERS_APP}
# --trace-to json:${TRACE_TEST_JSON}.json
# --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto
Expand Down
29 changes: 14 additions & 15 deletions src/python_testing/TC_DeviceBasicComposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,6 @@
find_tag_list_problems, find_tree_roots, flat_list_ok,
get_direct_children_of_root, parts_list_cycles, separate_endpoint_types)
from chip.tlv import uint
from mobly import asserts


def get_vendor_id(mei: int) -> int:
Expand Down Expand Up @@ -692,25 +691,25 @@ def test_TC_IDM_11_1(self):
if not success:
self.fail_current_test("At least one attribute string was not valid UTF-8")

def test_all_event_strings_valid(self):
asserts.skip("TODO: Validate every string in the read events is valid UTF-8 and has no nulls")
# def test_all_event_strings_valid(self):
# asserts.skip("TODO: Validate every string in the read events is valid UTF-8 and has no nulls")

def test_all_schema_scalars(self):
asserts.skip("TODO: Validate all int/uint are in range of the schema (or null if nullable) for known attributes")
# def test_all_schema_scalars(self):
# asserts.skip("TODO: Validate all int/uint are in range of the schema (or null if nullable) for known attributes")

def test_all_commands_reported_are_executable(self):
asserts.skip("TODO: Validate all commands reported in AcceptedCommandList are actually executable")
# def test_all_commands_reported_are_executable(self):
# asserts.skip("TODO: Validate all commands reported in AcceptedCommandList are actually executable")

def test_dump_all_pics_for_all_endpoints(self):
asserts.skip("TODO: Make a test that generates the basic PICS list for each endpoint based on actually reported contents")
# def test_dump_all_pics_for_all_endpoints(self):
# asserts.skip("TODO: Make a test that generates the basic PICS list for each endpoint based on actually reported contents")

def test_all_schema_mandatory_elements_present(self):
asserts.skip(
"TODO: Make a test that ensures every known cluster has the mandatory elements present (commands, attributes) based on features")
# def test_all_schema_mandatory_elements_present(self):
# asserts.skip(
# "TODO: Make a test that ensures every known cluster has the mandatory elements present (commands, attributes) based on features")

def test_all_endpoints_have_valid_composition(self):
asserts.skip(
"TODO: Make a test that verifies each endpoint has valid set of device types, and that the device type conformance is respected for each")
# def test_all_endpoints_have_valid_composition(self):
# asserts.skip(
# "TODO: Make a test that verifies each endpoint has valid set of device types, and that the device type conformance is respected for each")

def test_TC_SM_1_2(self):
self.print_step(1, "Wildcard read of device - already done")
Expand Down
1 change: 1 addition & 0 deletions src/python_testing/TC_LVL_2_3.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
# See https://github.com/project-chip/connectedhomeip/blob/master/docs/testing/python.md#defining-the-ci-test-arguments
# for details about the block below.
#
# FIXME: https://github.com/project-chip/connectedhomeip/issues/36885
# === BEGIN CI TEST ARGUMENTS ===
# test-runner-runs:
# run1:
Expand Down
28 changes: 12 additions & 16 deletions src/python_testing/TC_SWTCH.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,15 @@
# See https://github.com/project-chip/connectedhomeip/blob/master/docs/testing/python.md#defining-the-ci-test-arguments
# for details about the block below.
#
# TODO: https://github.com/project-chip/connectedhomeip/issues/36884
#
# === BEGIN CI TEST ARGUMENTS ===
# test-runner-runs:
# run1:
# app: ${ALL_CLUSTERS_APP}
# app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json
# script-args: >
# --test-case test_TC_SWTCH_2_2
# --endpoint 1
# --storage-path admin_storage.json
# --commissioning-method on-network
Expand All @@ -37,20 +40,9 @@
# app: ${ALL_CLUSTERS_APP}
# app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json
# script-args: >
# --endpoint 2
# --storage-path admin_storage.json
# --commissioning-method on-network
# --discriminator 1234
# --passcode 20202021
# --trace-to json:${TRACE_TEST_JSON}.json
# --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto
# --PICS src/app/tests/suites/certification/ci-pics-values
# factory-reset: true
# quiet: true
# run3:
# app: ${ALL_CLUSTERS_APP}
# app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json
# script-args: >
# --test-case test_TC_SWTCH_2_3
# --test-case test_TC_SWTCH_2_4
# --test-case test_TC_SWTCH_2_6
# --endpoint 3
# --storage-path admin_storage.json
# --commissioning-method on-network
Expand All @@ -61,10 +53,13 @@
# --PICS src/app/tests/suites/certification/ci-pics-values
# factory-reset: true
# quiet: true
# run4:
# run3:
# app: ${ALL_CLUSTERS_APP}
# app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json
# script-args: >
# --test-case test_TC_SWTCH_2_3
# --test-case test_TC_SWTCH_2_4
# --test-case test_TC_SWTCH_2_5
# --endpoint 4
# --storage-path admin_storage.json
# --commissioning-method on-network
Expand Down Expand Up @@ -281,7 +276,8 @@ def _expect_no_events_for_cluster(self, event_queue: queue.Queue, endpoint_id: i
elapsed = 0.0
time_remaining = timeout_sec

logging.info(f"Waiting {timeout_sec:.1f} seconds for no more events for cluster {expected_cluster} on endpoint {endpoint_id}")
logging.info(f"Waiting {timeout_sec:.1f} seconds for no more events for "
f"cluster {expected_cluster} on endpoint {endpoint_id}")
while time_remaining > 0:
try:
item: EventReadResult = event_queue.get(block=True, timeout=time_remaining)
Expand Down
2 changes: 1 addition & 1 deletion src/python_testing/TestBdxTransfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
# --trace-to json:${TRACE_TEST_JSON}.json
# --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto
# factory-reset: true
# quiet: false
# quiet: true
# === END CI TEST ARGUMENTS ===

import asyncio
Expand Down
3 changes: 3 additions & 0 deletions src/python_testing/TestMatterTestingSupport.py
Original file line number Diff line number Diff line change
Expand Up @@ -640,6 +640,8 @@ def test_xml_pics(self):

def test_parse_matter_test_args(self):
args = [
# Verify that it is possible to pass multiple test cases at once
"--tests", "TC_1", "TC_2",
# Verify that values are appended to a single argument
"--int-arg", "PIXIT.TEST.DEC:42",
"--int-arg", "PIXIT.TEST.HEX:0x1234",
Expand All @@ -650,6 +652,7 @@ def test_parse_matter_test_args(self):
]

parsed = parse_matter_test_args(args)
asserts.assert_equal(parsed.tests, ["TC_1", "TC_2"])
asserts.assert_equal(parsed.global_test_params.get("PIXIT.TEST.DEC"), 42)
asserts.assert_equal(parsed.global_test_params.get("PIXIT.TEST.HEX"), 0x1234)
asserts.assert_equal(parsed.global_test_params.get("PIXIT.TEST.STR.MULTI.1"), "foo")
Expand Down
1 change: 1 addition & 0 deletions src/python_testing/execute_python_tests.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#!/usr/bin/env -S python3 -B
#
# Copyright (c) 2024 Project CHIP Authors
# All rights reserved.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -628,6 +628,7 @@ class MatterTestConfig:
timeout: typing.Union[int, None] = None
endpoint: typing.Union[int, None] = 0
app_pid: int = 0
fail_on_skipped_tests: bool = False

commissioning_method: Optional[str] = None
discriminators: List[int] = field(default_factory=list)
Expand Down Expand Up @@ -1932,10 +1933,11 @@ def convert_args_to_matter_config(args: argparse.Namespace) -> MatterTestConfig:
config.paa_trust_store_path = args.paa_trust_store_path
config.ble_interface_id = args.ble_interface_id
config.pics = {} if args.PICS is None else read_pics_from_file(args.PICS)
config.tests = [] if args.tests is None else args.tests
config.tests = list(chain.from_iterable(args.tests or []))
config.timeout = args.timeout # This can be none, we pull the default from the test if it's unspecified
config.endpoint = args.endpoint # This can be None, the get_endpoint function allows the tests to supply a default
config.app_pid = 0 if args.app_pid is None else args.app_pid
config.fail_on_skipped_tests = args.fail_on_skipped

config.controller_node_id = args.controller_node_id
config.trace_to = args.trace_to
Expand All @@ -1962,13 +1964,10 @@ def parse_matter_test_args(argv: Optional[List[str]] = None) -> MatterTestConfig

basic_group = parser.add_argument_group(title="Basic arguments", description="Overall test execution arguments")

basic_group.add_argument('--tests',
'--test_case',
action="store",
nargs='+',
type=str,
metavar='test_a test_b...',
basic_group.add_argument('--tests', '--test-case', action='append', nargs='+', type=str, metavar='test_NAME',
help='A list of tests in the test class to execute.')
basic_group.add_argument('--fail-on-skipped', action="store_true", default=False,
help="Fail the test if any test cases are skipped")
basic_group.add_argument('--trace-to', nargs="*", default=[],
help="Where to trace (e.g perfetto, perfetto:path, json:log, json:path)")
basic_group.add_argument('--storage-path', action="store", type=pathlib.Path,
Expand Down Expand Up @@ -2056,17 +2055,17 @@ def parse_matter_test_args(argv: Optional[List[str]] = None) -> MatterTestConfig
help='Path to chip-tool credentials file root')

args_group = parser.add_argument_group(title="Config arguments", description="Test configuration global arguments set")
args_group.add_argument('--int-arg', nargs='*', action='append', type=int_named_arg, metavar="NAME:VALUE",
args_group.add_argument('--int-arg', nargs='+', action='append', type=int_named_arg, metavar="NAME:VALUE",
help="Add a named test argument for an integer as hex or decimal (e.g. -2 or 0xFFFF_1234)")
args_group.add_argument('--bool-arg', nargs='*', action='append', type=bool_named_arg, metavar="NAME:VALUE",
args_group.add_argument('--bool-arg', nargs='+', action='append', type=bool_named_arg, metavar="NAME:VALUE",
help="Add a named test argument for an boolean value (e.g. true/false or 0/1)")
args_group.add_argument('--float-arg', nargs='*', action='append', type=float_named_arg, metavar="NAME:VALUE",
args_group.add_argument('--float-arg', nargs='+', action='append', type=float_named_arg, metavar="NAME:VALUE",
help="Add a named test argument for a floating point value (e.g. -2.1 or 6.022e23)")
args_group.add_argument('--string-arg', nargs='*', action='append', type=str_named_arg, metavar="NAME:VALUE",
args_group.add_argument('--string-arg', nargs='+', action='append', type=str_named_arg, metavar="NAME:VALUE",
help="Add a named test argument for a string value")
args_group.add_argument('--json-arg', nargs='*', action='append', type=json_named_arg, metavar="NAME:VALUE",
args_group.add_argument('--json-arg', nargs='+', action='append', type=json_named_arg, metavar="NAME:VALUE",
help="Add a named test argument for JSON stored as a list or dict")
args_group.add_argument('--hex-arg', nargs='*', action='append', type=bytes_as_hex_named_arg, metavar="NAME:VALUE",
args_group.add_argument('--hex-arg', nargs='+', action='append', type=bytes_as_hex_named_arg, metavar="NAME:VALUE",
help="Add a named test argument for an octet string in hex (e.g. 0011cafe or 00:11:CA:FE)")

if not argv:
Expand Down Expand Up @@ -2510,6 +2509,8 @@ def run_tests_no_exit(test_class: MatterBaseTest, matter_test_config: MatterTest
try:
runner.run()
ok = runner.results.is_all_pass and ok
if matter_test_config.fail_on_skipped_tests and runner.results.skipped:
ok = False
except TimeoutError:
ok = False
except signals.TestAbortAll:
Expand Down
Loading

0 comments on commit ddc48d9

Please sign in to comment.