Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for cargo test #39

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
47b475e
Add support for cargo test
luca-della-vedova Jul 9, 2024
7177b40
Switch to minidom
luca-della-vedova Jul 9, 2024
3bab48f
Remove standalone parameter
luca-della-vedova Jul 9, 2024
5565edd
Add cargo args parameter
luca-della-vedova Jul 10, 2024
77fdbdd
Remove color from test output
luca-della-vedova Jul 10, 2024
1d07204
Remove unnecessary assertion
luca-della-vedova Jul 10, 2024
a80c60f
Change build verb to also build tests
luca-della-vedova Jul 10, 2024
83a9c3a
Fix cargo args passing
luca-della-vedova Jul 10, 2024
50563f6
Change target dir to install base for tests
luca-della-vedova Jul 10, 2024
1389764
Split build and install folders / steps
luca-della-vedova Jul 10, 2024
bdbbf81
Add None check
luca-della-vedova Jul 10, 2024
1becab3
Change back to build base
luca-della-vedova Jul 10, 2024
1dda2c4
Add lockfile to cargo install
luca-della-vedova Jul 10, 2024
418612b
Simplify cargo paths
luca-della-vedova Jul 10, 2024
223f490
Reduce number of built targets
luca-della-vedova Jul 12, 2024
6a22fe5
Remove outdated comment
luca-della-vedova Jul 12, 2024
fe867be
Remove doctests
luca-della-vedova Jul 12, 2024
bf3637e
Add documentation
luca-della-vedova Jul 16, 2024
dc8d9c9
Consistency for test error handling
luca-della-vedova Jul 16, 2024
5a4067a
Fix README
luca-della-vedova Jul 16, 2024
e3e9ac8
Revert "Consistency for test error handling"
luca-della-vedova Jul 16, 2024
d2ea503
Remove NO_COLOR argument
luca-della-vedova Jul 22, 2024
b3ef3cd
Remove TODO
luca-della-vedova Aug 20, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,3 +74,27 @@ $ hello-world2
Hello, world!
```

### Testing

Test the packages with cargo:

```sh
$ colcon test
Starting >>> hello_world_2
Starting >>> hello_world
Finished <<< hello_world [0.24s]
Finished <<< hello_world_2 [0.25s]

Summary: 2 packages finished [0.39s]
```

Inspect the test results (`cargo test` and `cargo fmt --check`).
They should all succeed for the empty templates:

```sh
$ colcon test-result --all
build/hello_world_2/cargo_test.xml: 2 tests, 0 errors, 0 failures, 0 skipped
build/hello_world/cargo_test.xml: 2 tests, 0 errors, 0 failures, 0 skipped

Summary: 4 tests, 0 errors, 0 failures, 0 skipped
```
27 changes: 25 additions & 2 deletions colcon_cargo/task/cargo/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,17 @@ async def build( # noqa: D102
if rc and rc.returncode:
return rc.returncode

cmd = self._install_cmd(cargo_args)

self.progress('install')

# colcon-ros-cargo overrides install command to return None
if cmd is not None:
rc = await run(
self.context, cmd, cwd=self.context.pkg.path, env=env)
if rc and rc.returncode:
return rc.returncode

if not skip_hook_creation:
create_environment_scripts(
self.context.pkg, args, additional_hooks=additional_hooks)
Expand All @@ -97,9 +108,21 @@ def _prepare(self, env, additional_hooks):
def _build_cmd(self, cargo_args):
args = self.context.args
return [
CARGO_EXECUTABLE, 'install',
CARGO_EXECUTABLE,
'build',
'--quiet',
'--target-dir', args.build_base,
] + cargo_args

# Overridden by colcon-ros-cargo
def _install_cmd(self, cargo_args):
args = self.context.args
return [
CARGO_EXECUTABLE,
'install',
'--force',
'--quiet',
'--path', args.path,
'--locked',
'--path', '.',
'--root', args.install_base,
] + cargo_args
101 changes: 92 additions & 9 deletions colcon_cargo/task/cargo/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# Licensed under the Apache License, Version 2.0

import os
from xml.dom import minidom
import xml.etree.ElementTree as eTree

from colcon_cargo.task.cargo import CARGO_EXECUTABLE
from colcon_core.event.test import TestFailure
Expand All @@ -22,38 +24,119 @@ def __init__(self): # noqa: D107
satisfies_version(TaskExtensionPoint.EXTENSION_POINT_VERSION, '^1.0')

def add_arguments(self, *, parser): # noqa: D102
pass
parser.add_argument(
'--cargo-args',
nargs='*', metavar='*', type=str.lstrip,
help='Pass arguments to Cargo projects. '
'Arguments matching other options must be prefixed by a space,\n'
'e.g. --cargo-args " --help"')

async def test(self, *, additional_hooks=None): # noqa: D102
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we actually add a docstring here? Perhaps just mentioning at a high level the two cargo commands running and why?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure what to say about the why we run tests / fmt 😅 but very valid, I added a docstring saying what we run (test and fmt) and what we don't and why we don't (docs) bf3637e

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
async def test(self, *, additional_hooks=None): # noqa: D102
async def test(self, *, additional_hooks=None):

I don't think we need the # noqa: D102 anymore since the docstring has been added.

"""
Runs tests and style checks for the requested package.

Results are compiled into a single result `cargo_test.xml` file
with two test results, one for all the tests (cargo test) and one for
style (`cargo fmt --check`).
Documentation tests (`cargo test --doc`) are not implemented
since it is not possible to distinguish between a test that failed
because of a failing case and one that failed because the crate
contains no library target.
"""
pkg = self.context.pkg
args = self.context.args

logger.info(
"Testing Cargo package in '{args.path}'".format_map(locals()))

assert os.path.exists(args.build_base)
assert os.path.exists(args.build_base), \
'Has this package been built before?'

test_results_path = os.path.join(args.build_base, 'test_results')
os.makedirs(test_results_path, exist_ok=True)
test_results_path = os.path.join(args.build_base, 'cargo_test.xml')

try:
env = await get_command_environment(
'test', args.build_base, self.context.dependencies)
except RuntimeError as e:
# TODO(luca) log this as error in the test result file
logger.error(str(e))
return 1

if CARGO_EXECUTABLE is None:
raise RuntimeError("Could not find 'cargo' executable")
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Any reason we shouldn't just log this and return early like we do with the RuntimeError directly above?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fair point, I just left it unchanged to avoid touching up too much. This behavior is also what the build task does

try:
env = await get_command_environment(
'build', args.build_base, self.context.dependencies)
except RuntimeError as e:
logger.error(str(e))
return 1
self.progress('prepare')
rc = self._prepare(env, additional_hooks)
if rc:
return rc
# Clean up the build dir
build_dir = Path(args.build_base)
if args.clean_build:
if build_dir.is_symlink():
build_dir.unlink()
elif build_dir.exists():
shutil.rmtree(build_dir)
# Invoke build step
if CARGO_EXECUTABLE is None:
raise RuntimeError("Could not find 'cargo' executable")
so imho if we change it here we should also change it there for consistency, and then we would have a PR about cargo test going to touch on the build task!
What the TODO documents is that I was pondering whether we should create a test-result file for this case or just exit early with an error. I suspect we should still create a test result file hence I noted that down as a TODO.

For reference making it a return code would produce this result:

image

While currently (RuntimeError) this is the output:

image

My recommendation here would be to decide which looks best and do a followup PR that fixes it for both the build and test task. It will be a choice between verbosity (raise exception, get full backtrace) and simplicity (return error code, simple and concise error message). But as a spoiler I do agree with you that the concise error code looks a lot better, most people probably don't want a backtrace of colcon's task spawning

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think raising the runtime error is fine. It's what colcon-cmake does if cmake isn't found. Personally I like the distinction between "an error in the test run" which is indicated by the test result output containing error listings and an "an error running the tests" which is indicated by the colcon exception.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removed the TODO b3ef3cd


cargo_args = args.cargo_args
if cargo_args is None:
cargo_args = []

# invoke cargo test
rc = await run(
unit_rc = await run(
self.context,
[CARGO_EXECUTABLE, 'test', '-q',
'--target-dir', test_results_path],
cwd=args.path, env=env)
self._test_cmd(cargo_args),
cwd=args.path, env=env, capture_output=True)

fmt_rc = await run(
self.context,
self._fmt_cmd(),
cwd=args.path, env=env, capture_output=True)

error_report = self._create_error_report(unit_rc, fmt_rc)
with open(test_results_path, 'wb') as result_file:
xmlstr = minidom.parseString(eTree.tostring(error_report))
xmlstr = xmlstr.toprettyxml(indent=' ', encoding='utf-8')
result_file.write(xmlstr)

if rc.returncode:
if unit_rc.returncode or fmt_rc.returncode:
self.context.put_event_into_queue(TestFailure(pkg.name))
# the return code should still be 0
return 0

def _test_cmd(self, cargo_args):
args = self.context.args
return [
CARGO_EXECUTABLE,
'test',
'--quiet',
'--target-dir',
args.build_base,
] + cargo_args + [
'--',
'--color=never',
]

# Ignore cargo args for rustfmt
def _fmt_cmd(self):
return [
CARGO_EXECUTABLE,
'fmt',
'--check',
'--',
'--color=never',
]

def _create_error_report(self, unit_rc, fmt_rc) -> eTree.Element:
# TODO(luca) revisit when programmatic output from cargo test is
# stabilized, for now just have a suite for unit, and fmt tests
failures = 0
testsuites = eTree.Element('testsuites')
# TODO(luca) add time
testsuite = eTree.SubElement(testsuites,
'testsuite', {'name': 'cargo_test'})
unit_testcase = eTree.SubElement(testsuite, 'testcase',
{'name': 'unit'})
if unit_rc.returncode:
unit_failure = eTree.SubElement(unit_testcase, 'failure',
{'message': 'cargo test failed'})
unit_failure.text = unit_rc.stdout.decode('utf-8')
failures += 1
fmt_testcase = eTree.SubElement(testsuite, 'testcase', {'name': 'fmt'})
if fmt_rc.returncode:
fmt_failure = eTree.SubElement(fmt_testcase, 'failure',
{'message': 'cargo fmt failed'})
fmt_failure.text = fmt_rc.stdout.decode('utf-8')
failures += 1
testsuite.attrib['errors'] = str(0)
testsuite.attrib['failures'] = str(failures)
testsuite.attrib['skipped'] = str(0)
testsuite.attrib['tests'] = str(2)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perhaps I'm misunderstanding, but it seems like we're hard coding the report to always list 2 tests run?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes that is the case, all the output from cargo test is collated into a single result, and the result of cargo fmt --check is into another result. This is due to it not being possible right now to separate the result of cargo test by test case (unless we migrate to nextest or try to parse human readable output. It's part of the "what's next" in the PR description (Test granularity)

return testsuites
5 changes: 5 additions & 0 deletions test/rust-sample-package/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
/// Failing doctest example
/// ```
/// invalid_syntax
/// ```
pub struct Type;
14 changes: 14 additions & 0 deletions test/rust-sample-package/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,17 @@
fn main() {
println!("Hello, world!");
}

#[cfg(test)]
mod tests {

#[test]
fn ok() -> Result<(), ()> {
Ok(())
}

#[test]
fn err() -> Result<(), ()> {
Err(())
}
}
11 changes: 11 additions & 0 deletions test/spell_check.words
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,15 @@ asyncio
autouse
colcon
completers
deps
easymov
etree
getroot
iterdir
linter
lstrip
luca
minidom
monkeypatch
nargs
noqa
Expand All @@ -20,12 +24,19 @@ returncode
rglob
rmtree
rtype
rustfmt
scspell
setuptools
skipif
symlink
tempfile
testcase
testsuite
testsuites
thomas
tmpdir
todo
toml
toprettyxml
tostring
xmlstr
37 changes: 36 additions & 1 deletion test/test_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,11 @@
import shutil
import tempfile
from types import SimpleNamespace
import xml.etree.ElementTree as eTree

from colcon_cargo.package_identification.cargo import CargoPackageIdentification # noqa: E501
from colcon_cargo.task.cargo.build import CargoBuildTask
from colcon_cargo.task.cargo.test import CargoTestTask
from colcon_core.event_handler.console_direct import ConsoleDirectEventHandler
from colcon_core.package_descriptor import PackageDescriptor
from colcon_core.subprocess import new_event_loop
Expand Down Expand Up @@ -42,7 +44,7 @@ def test_package_identification():
@pytest.mark.skipif(
not shutil.which('cargo'),
reason='Rust must be installed to run this test')
def test_build_package():
def test_build_and_test_package():
event_loop = new_event_loop()
asyncio.set_event_loop(event_loop)

Expand Down Expand Up @@ -83,5 +85,38 @@ def test_build_package():
if os.name == 'nt':
app_name += '.exe'
assert (install_base / 'bin' / app_name).is_file()

# Now compile tests
task = CargoTestTask()
task.set_context(context=context)

# Expect tests to have failed but return code will still be 0
# since testing run succeeded
rc = event_loop.run_until_complete(task.test())
assert not rc
build_base = Path(task.context.args.build_base)

# Make sure the testing files are built
assert (build_base / 'debug' / 'deps').is_dir()
assert len(os.listdir(build_base / 'debug' / 'deps')) > 0
result_file_path = build_base / 'cargo_test.xml'
assert result_file_path.is_file()
check_result_file(result_file_path)

finally:
event_loop.close()


# Check the testing result file, expect cargo test and doc test to fail
# but fmt to succeed
def check_result_file(path):
tree = eTree.parse(path)
root = tree.getroot()
testsuite = root.find('testsuite')
assert testsuite is not None
unit_result = testsuite.find("testcase[@name='unit']")
assert unit_result is not None
assert unit_result.find('failure') is not None
fmt_result = testsuite.find("testcase[@name='fmt']")
assert fmt_result is not None
assert fmt_result.find('failure') is None
Loading