From cfb979ed2f7f28e2927ad83996434f97dc643bcd Mon Sep 17 00:00:00 2001 From: Jonathan Bohren Date: Mon, 14 Dec 2015 19:15:27 -0500 Subject: [PATCH 01/11] tests: Unifying integrated/system tests, adding additional test cases. --- tests/README.md | 15 +- .../resources/pkg_with_cmake_args/package.xml | 53 ----- tests/integrated/test_build.py | 121 ----------- .../catkin_pkgs/cmake_args}/CMakeLists.txt | 2 +- .../catkin_pkgs/cmake_args/package.xml | 9 + .../catkin_pkgs/cmake_err/CMakeLists.txt | 5 + .../catkin_pkgs/cmake_err/package.xml | 9 + .../catkin_pkgs/cmake_warning/CMakeLists.txt | 5 + .../catkin_pkgs/cmake_warning/package.xml | 9 + .../catkin_pkgs/make_err/CMakeLists.txt | 5 + .../resources/catkin_pkgs/make_err/fail.cpp | 1 + .../catkin_pkgs/make_err/package.xml | 9 + .../catkin_pkgs/make_warning/CMakeLists.txt | 5 + .../catkin_pkgs/make_warning/package.xml | 9 + .../catkin_pkgs/make_warning/warn.cpp | 1 + .../catkin_pkgs/products_0/CMakeLists.txt | 16 ++ .../cmake/extras.cmake.develspace.in | 1 + .../cmake/extras.cmake.installspace.in | 1 + .../products_0/include/make_products_0/fun.h | 3 + .../resources/catkin_pkgs/products_0/lib.cpp | 4 + .../resources/catkin_pkgs/products_0/main.cpp | 6 + .../catkin_pkgs/products_0/package.xml | 9 + .../catkin_pkgs/python_tests}/CMakeLists.txt | 4 +- .../catkin_pkgs/python_tests}/package.xml | 4 +- .../catkin_pkgs/python_tests}/setup.py | 0 .../catkin_pkgs/python_tests}/test_good.py | 1 + .../python_tests_err}/CMakeLists.txt | 2 +- .../catkin_pkgs/python_tests_err}/package.xml | 4 +- .../catkin_pkgs/python_tests_err}/setup.py | 0 .../catkin_pkgs/python_tests_err}/test_bad.py | 1 + .../cmake_pkgs/app_pkg/CMakeLists.txt | 12 ++ .../resources/cmake_pkgs/app_pkg/package.xml | 11 + .../resources/cmake_pkgs/app_pkg/vanilla.cpp | 8 + .../cmake_pkgs/cmake_pkg/CMakeLists.txt | 10 + .../cmake_pkgs/cmake_pkg/package.xml | 10 + .../cmake_pkgs/cmake_pkg/vanilla.cpp | 6 + .../cmake_pkgs/lib_pkg/CMakeLists.txt | 15 ++ .../resources/cmake_pkgs/lib_pkg/package.xml | 10 + .../resources/cmake_pkgs/lib_pkg/vanilla.cpp | 6 + .../resources/cmake_pkgs/lib_pkg/vanilla.h | 2 + .../ros_pkgs/pkg_with_roslint/CMakeLists.txt | 7 + .../ros_pkgs/pkg_with_roslint/main.cpp | 4 + .../ros_pkgs/pkg_with_roslint/package.xml | 10 + tests/system/verbs/catkin_build/test_args.py | 99 +++++++++ tests/system/verbs/catkin_build/test_build.py | 203 ++++++++++++++++++ .../system/verbs/catkin_build/test_bwlists.py | 42 ++++ .../system/verbs/catkin_build/test_context.py | 37 ++++ .../system/verbs/catkin_build/test_eclipse.py | 38 ++++ .../verbs/catkin_build/test_modify_ws.py | 52 +++++ .../verbs/catkin_build/test_unit_tests.py | 55 +++++ .../catkin_build/test_whitespace_in_paths.py | 28 +-- .../verbs/catkin_config}/__init__.py | 0 .../verbs/catkin_config}/test_config.py | 10 +- tests/system/verbs/catkin_init/__init__.py | 0 .../verbs/catkin_init}/test_init.py | 10 +- tests/system/workspace_factory.py | 114 ++++++---- tests/unit/test_runner.py | 32 --- tests/utils.py | 38 +++- 58 files changed, 897 insertions(+), 286 deletions(-) delete mode 100644 tests/integrated/resources/pkg_with_cmake_args/package.xml delete mode 100644 tests/integrated/test_build.py rename tests/{integrated/resources/pkg_with_cmake_args => system/resources/catkin_pkgs/cmake_args}/CMakeLists.txt (90%) create mode 100644 tests/system/resources/catkin_pkgs/cmake_args/package.xml create mode 100644 tests/system/resources/catkin_pkgs/cmake_err/CMakeLists.txt create mode 100644 tests/system/resources/catkin_pkgs/cmake_err/package.xml create mode 100644 tests/system/resources/catkin_pkgs/cmake_warning/CMakeLists.txt create mode 100644 tests/system/resources/catkin_pkgs/cmake_warning/package.xml create mode 100644 tests/system/resources/catkin_pkgs/make_err/CMakeLists.txt create mode 100644 tests/system/resources/catkin_pkgs/make_err/fail.cpp create mode 100644 tests/system/resources/catkin_pkgs/make_err/package.xml create mode 100644 tests/system/resources/catkin_pkgs/make_warning/CMakeLists.txt create mode 100644 tests/system/resources/catkin_pkgs/make_warning/package.xml create mode 100644 tests/system/resources/catkin_pkgs/make_warning/warn.cpp create mode 100644 tests/system/resources/catkin_pkgs/products_0/CMakeLists.txt create mode 100644 tests/system/resources/catkin_pkgs/products_0/cmake/extras.cmake.develspace.in create mode 100644 tests/system/resources/catkin_pkgs/products_0/cmake/extras.cmake.installspace.in create mode 100644 tests/system/resources/catkin_pkgs/products_0/include/make_products_0/fun.h create mode 100644 tests/system/resources/catkin_pkgs/products_0/lib.cpp create mode 100644 tests/system/resources/catkin_pkgs/products_0/main.cpp create mode 100644 tests/system/resources/catkin_pkgs/products_0/package.xml rename tests/{integrated/resources/pkg_with_test => system/resources/catkin_pkgs/python_tests}/CMakeLists.txt (72%) rename tests/{integrated/resources/pkg_with_broken_test => system/resources/catkin_pkgs/python_tests}/package.xml (69%) rename tests/{integrated/resources/pkg_with_broken_test => system/resources/catkin_pkgs/python_tests}/setup.py (100%) rename tests/{integrated/resources/pkg_with_test => system/resources/catkin_pkgs/python_tests}/test_good.py (99%) rename tests/{integrated/resources/pkg_with_broken_test => system/resources/catkin_pkgs/python_tests_err}/CMakeLists.txt (84%) rename tests/{integrated/resources/pkg_with_test => system/resources/catkin_pkgs/python_tests_err}/package.xml (63%) rename tests/{integrated/resources/pkg_with_test => system/resources/catkin_pkgs/python_tests_err}/setup.py (100%) rename tests/{integrated/resources/pkg_with_broken_test => system/resources/catkin_pkgs/python_tests_err}/test_bad.py (99%) create mode 100644 tests/system/resources/cmake_pkgs/app_pkg/CMakeLists.txt create mode 100644 tests/system/resources/cmake_pkgs/app_pkg/package.xml create mode 100644 tests/system/resources/cmake_pkgs/app_pkg/vanilla.cpp create mode 100644 tests/system/resources/cmake_pkgs/cmake_pkg/CMakeLists.txt create mode 100644 tests/system/resources/cmake_pkgs/cmake_pkg/package.xml create mode 100644 tests/system/resources/cmake_pkgs/cmake_pkg/vanilla.cpp create mode 100644 tests/system/resources/cmake_pkgs/lib_pkg/CMakeLists.txt create mode 100644 tests/system/resources/cmake_pkgs/lib_pkg/package.xml create mode 100644 tests/system/resources/cmake_pkgs/lib_pkg/vanilla.cpp create mode 100644 tests/system/resources/cmake_pkgs/lib_pkg/vanilla.h create mode 100644 tests/system/resources/ros_pkgs/pkg_with_roslint/CMakeLists.txt create mode 100644 tests/system/resources/ros_pkgs/pkg_with_roslint/main.cpp create mode 100644 tests/system/resources/ros_pkgs/pkg_with_roslint/package.xml create mode 100644 tests/system/verbs/catkin_build/test_args.py create mode 100644 tests/system/verbs/catkin_build/test_build.py create mode 100644 tests/system/verbs/catkin_build/test_bwlists.py create mode 100644 tests/system/verbs/catkin_build/test_context.py create mode 100644 tests/system/verbs/catkin_build/test_eclipse.py create mode 100644 tests/system/verbs/catkin_build/test_modify_ws.py create mode 100644 tests/system/verbs/catkin_build/test_unit_tests.py rename tests/{integrated => system/verbs/catkin_config}/__init__.py (100%) rename tests/{integrated => system/verbs/catkin_config}/test_config.py (71%) create mode 100644 tests/system/verbs/catkin_init/__init__.py rename tests/{integrated => system/verbs/catkin_init}/test_init.py (63%) delete mode 100644 tests/unit/test_runner.py diff --git a/tests/README.md b/tests/README.md index b9933001..190f20f4 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,10 +1,17 @@ Testing ======= -The `catkin_tools` test harness includes the following -types of tests, organized into different directories: +The `catkin_tools` test harness includes the following types of tests, +organized into different directories: * **unit** -- API tests for the `catkin_tools` python interface -* **integrated** -- Full integration tests for different workflows -* **system** -- Tests which not only test integrated parts of catkin_tools but the interaction with other, external projects like catkin_pkg and catkin. +* **system** -- Tests which not only test integrated parts of `catkin_tools` + but the interaction with other, external projects like catkin_pkg and catkin. +## Running Tests + +To run all tests and view the output, run the following in this directory: + +``` +nosetests -s +``` diff --git a/tests/integrated/resources/pkg_with_cmake_args/package.xml b/tests/integrated/resources/pkg_with_cmake_args/package.xml deleted file mode 100644 index f0f4982d..00000000 --- a/tests/integrated/resources/pkg_with_cmake_args/package.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - pkg_with_cmake_args - 0.0.0 - The pkg_with_cmake_args package - - - - - jbohren - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - - - - - - - - - - - \ No newline at end of file diff --git a/tests/integrated/test_build.py b/tests/integrated/test_build.py deleted file mode 100644 index e51a455e..00000000 --- a/tests/integrated/test_build.py +++ /dev/null @@ -1,121 +0,0 @@ - -from __future__ import print_function - -import os -import shutil - -from ..utils import in_temporary_directory -from ..utils import assert_cmd_success, assert_cmd_failure -from ..utils import assert_files_exist - -from ..workspace_assertions import assert_workspace_initialized -from ..workspace_assertions import assert_no_warnings - -TEST_DIR = os.path.dirname(__file__) -RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources') - - -@in_temporary_directory -def test_build_no_src(): - assert_cmd_failure(['catkin', 'build']) - - -@in_temporary_directory -def test_build_auto_init_no_pkgs(): - cwd = os.getcwd() - source_space = os.path.join(cwd, 'src') - print("Creating source directory: %s" % source_space) - os.mkdir(source_space) - out = assert_cmd_failure(['catkin', 'build', '--no-notify']) - assert_no_warnings(out) - assert_workspace_initialized('.') - - -@in_temporary_directory -def test_build_auto_init_one_pkg(): - cwd = os.getcwd() - source_space = os.path.join(cwd, 'src') - print("Creating source directory: %s" % source_space) - os.mkdir(source_space) - assert_cmd_success(['catkin', 'create', 'pkg', '--rosdistro', 'hydro', - '-p', source_space, 'pkg_a']) - out = assert_cmd_success(['catkin', 'build', '--no-notify', '--no-status', - '--verbose']) - assert_no_warnings(out) - assert_workspace_initialized('.') - - -@in_temporary_directory -def test_build_eclipse(): - cwd = os.getcwd() - source_space = os.path.join(cwd, 'src') - print("Creating source directory: %s" % source_space) - os.mkdir(source_space) - assert_cmd_success(['catkin', 'create', 'pkg', '--rosdistro', 'hydro', - '-p', source_space, 'pkg_a']) - out = assert_cmd_success(['catkin', 'build', '--no-notify', - '--no-status', '--verbose', '--cmake-args', - '-GEclipse CDT4 - Unix Makefiles']) - assert_no_warnings(out) - assert_workspace_initialized('.') - assert_files_exist(os.path.join(cwd, 'build', 'pkg_a'), - ['.project', '.cproject']) - - -@in_temporary_directory -def test_build_pkg_unit_tests(): - cwd = os.getcwd() - source_space = os.path.join(cwd, 'src') - print("Creating source directory: %s" % source_space) - shutil.copytree(RESOURCES_DIR, source_space) - assert_cmd_success(['catkin', 'build', '--no-notify', '--no-status', - '--verbose', '--no-deps', 'pkg_with_test', - '--make-args', 'run_tests']) - assert_cmd_success(['catkin_test_results', 'build/pkg_with_test']) - assert_cmd_success(['catkin', 'build', '--no-notify', '--no-status', - '--verbose', '--no-deps', 'pkg_with_broken_test', - '--make-args', 'run_tests']) - assert_cmd_failure(['catkin_test_results', 'build/pkg_with_broken_test']) - - -@in_temporary_directory -def test_build_pkg_unit_tests_alias(): - cwd = os.getcwd() - source_space = os.path.join(cwd, 'src') - print("Creating source directory: %s" % source_space) - shutil.copytree(RESOURCES_DIR, source_space) - - assert_cmd_success(['catkin', 'run_tests', 'pkg_with_test', '--no-deps', - '--no-notify', '--no-status']) - assert_cmd_success(['catkin_test_results', 'build/pkg_with_test']) - - assert_cmd_success(['catkin', 'run_tests', 'pkg_with_broken_test', - '--no-deps', '--no-notify', '--no-status']) - assert_cmd_failure(['catkin_test_results', 'build/pkg_with_broken_test']) - -@in_temporary_directory -def test_build_pkg_cmake_args(): - cwd = os.getcwd() - source_space = os.path.join(cwd, 'src') - print("Creating source directory: %s" % source_space) - shutil.copytree(RESOURCES_DIR, source_space) - - assert_cmd_failure(['catkin', 'build', 'pkg_with_cmake_args', '--no-deps', - '--no-notify', '--no-status', '--force-cmake', - '--cmake-args', - '-DVAR1=VAL1']) - - assert_cmd_failure(['catkin', 'build', 'pkg_with_cmake_args', '--no-deps', - '--no-notify', '--no-status', '--force-cmake', - '--cmake-args', - '-DVAR1=VAL1', '-DVAR2=VAL2']) - - assert_cmd_success(['catkin', 'build', 'pkg_with_cmake_args', '--no-deps', - '--no-notify', '--no-status', '--force-cmake', - '--cmake-args', - '-DVAR1=VAL1', '-DVAR2=VAL2', '-DVAR3=VAL3']) - - assert_cmd_success(['catkin', 'build', 'pkg_with_cmake_args', '--no-deps', - '--no-notify', '--no-status', '--force-cmake', - '--cmake-args', - '-DVAR1=VAL1', '-DVAR2=VAL2', '-DVAR3=VAL3', '--']) diff --git a/tests/integrated/resources/pkg_with_cmake_args/CMakeLists.txt b/tests/system/resources/catkin_pkgs/cmake_args/CMakeLists.txt similarity index 90% rename from tests/integrated/resources/pkg_with_cmake_args/CMakeLists.txt rename to tests/system/resources/catkin_pkgs/cmake_args/CMakeLists.txt index 00c805cf..e6212ddc 100644 --- a/tests/integrated/resources/pkg_with_cmake_args/CMakeLists.txt +++ b/tests/system/resources/catkin_pkgs/cmake_args/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 2.8.3) -project(pkg_with_cmake_args) +project(cmake_args) find_package(catkin REQUIRED) diff --git a/tests/system/resources/catkin_pkgs/cmake_args/package.xml b/tests/system/resources/catkin_pkgs/cmake_args/package.xml new file mode 100644 index 00000000..b62e0f7d --- /dev/null +++ b/tests/system/resources/catkin_pkgs/cmake_args/package.xml @@ -0,0 +1,9 @@ + + + cmake_args + 0.1.0 + This package fails unless three CMake arguments are set. + todo + BSD + catkin + diff --git a/tests/system/resources/catkin_pkgs/cmake_err/CMakeLists.txt b/tests/system/resources/catkin_pkgs/cmake_err/CMakeLists.txt new file mode 100644 index 00000000..90962782 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/cmake_err/CMakeLists.txt @@ -0,0 +1,5 @@ +cmake_minimum_required(VERSION 2.8.3) +project(cmake_err) +find_package(catkin REQUIRED) +catkin_package() +message(SEND_ERROR "This package sends an error from cmake.") diff --git a/tests/system/resources/catkin_pkgs/cmake_err/package.xml b/tests/system/resources/catkin_pkgs/cmake_err/package.xml new file mode 100644 index 00000000..9dfc69ab --- /dev/null +++ b/tests/system/resources/catkin_pkgs/cmake_err/package.xml @@ -0,0 +1,9 @@ + + + cmake_err + 0.1.0 + This package produces a CMake error when configuring. + todo + BSD + catkin + diff --git a/tests/system/resources/catkin_pkgs/cmake_warning/CMakeLists.txt b/tests/system/resources/catkin_pkgs/cmake_warning/CMakeLists.txt new file mode 100644 index 00000000..9a49c500 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/cmake_warning/CMakeLists.txt @@ -0,0 +1,5 @@ +cmake_minimum_required(VERSION 2.8.3) +project(cmake_warning) +find_package(catkin REQUIRED) +catkin_package() +message(WARNING "This sends a warning from cmake.") diff --git a/tests/system/resources/catkin_pkgs/cmake_warning/package.xml b/tests/system/resources/catkin_pkgs/cmake_warning/package.xml new file mode 100644 index 00000000..082c7012 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/cmake_warning/package.xml @@ -0,0 +1,9 @@ + + + cmake_warning + 0.1.0 + This package produces a warning when configuring CMake. + todo + TODO + catkin + diff --git a/tests/system/resources/catkin_pkgs/make_err/CMakeLists.txt b/tests/system/resources/catkin_pkgs/make_err/CMakeLists.txt new file mode 100644 index 00000000..9bbb128c --- /dev/null +++ b/tests/system/resources/catkin_pkgs/make_err/CMakeLists.txt @@ -0,0 +1,5 @@ +cmake_minimum_required(VERSION 2.8.3) +project(make_err) +find_package(catkin REQUIRED) +catkin_package() +add_executable(fail fail.cpp) diff --git a/tests/system/resources/catkin_pkgs/make_err/fail.cpp b/tests/system/resources/catkin_pkgs/make_err/fail.cpp new file mode 100644 index 00000000..94fbbc02 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/make_err/fail.cpp @@ -0,0 +1 @@ +#error This sends an error from make diff --git a/tests/system/resources/catkin_pkgs/make_err/package.xml b/tests/system/resources/catkin_pkgs/make_err/package.xml new file mode 100644 index 00000000..337a2572 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/make_err/package.xml @@ -0,0 +1,9 @@ + + + make_err + 0.0.0 + This package produces a make error when building. + todo + BSD + catkin + diff --git a/tests/system/resources/catkin_pkgs/make_warning/CMakeLists.txt b/tests/system/resources/catkin_pkgs/make_warning/CMakeLists.txt new file mode 100644 index 00000000..5bd83417 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/make_warning/CMakeLists.txt @@ -0,0 +1,5 @@ +cmake_minimum_required(VERSION 2.8.3) +project(make_warning) +find_package(catkin REQUIRED) +catkin_package() +add_library(warn warn.cpp) diff --git a/tests/system/resources/catkin_pkgs/make_warning/package.xml b/tests/system/resources/catkin_pkgs/make_warning/package.xml new file mode 100644 index 00000000..855a55e3 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/make_warning/package.xml @@ -0,0 +1,9 @@ + + + make_warning + 0.1.0 + This package generats a warning in the make stage. + todo + BSD + catkin + diff --git a/tests/system/resources/catkin_pkgs/make_warning/warn.cpp b/tests/system/resources/catkin_pkgs/make_warning/warn.cpp new file mode 100644 index 00000000..fbdecd24 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/make_warning/warn.cpp @@ -0,0 +1 @@ +#warning This sends a warning from make diff --git a/tests/system/resources/catkin_pkgs/products_0/CMakeLists.txt b/tests/system/resources/catkin_pkgs/products_0/CMakeLists.txt new file mode 100644 index 00000000..4d86b048 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/products_0/CMakeLists.txt @@ -0,0 +1,16 @@ +cmake_minimum_required(VERSION 2.8.3) +project(products_0) +find_package(catkin REQUIRED) + +catkin_package( + INCLUDE_DIRS include + LIBRARIES ${PROJECT_NAME}_lib + CFG_EXTRAS extras.cmake) + +include_directories(include) + +add_library(${PROJECT_NAME}_lib lib.cpp) +target_link_libraries(${PROJECT_NAME}_lib ${catkin_LIBRARIES}) + +add_executable(main main.cpp) +target_link_libraries(main ${PROJECT_NAME}_lib ${catkin_LIBRARIES}) diff --git a/tests/system/resources/catkin_pkgs/products_0/cmake/extras.cmake.develspace.in b/tests/system/resources/catkin_pkgs/products_0/cmake/extras.cmake.develspace.in new file mode 100644 index 00000000..6ef92731 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/products_0/cmake/extras.cmake.develspace.in @@ -0,0 +1 @@ +set($ENV{PRODUCTS_0} "devel") diff --git a/tests/system/resources/catkin_pkgs/products_0/cmake/extras.cmake.installspace.in b/tests/system/resources/catkin_pkgs/products_0/cmake/extras.cmake.installspace.in new file mode 100644 index 00000000..6bf98810 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/products_0/cmake/extras.cmake.installspace.in @@ -0,0 +1 @@ +set($ENV{PRODUCTS_0} "install") diff --git a/tests/system/resources/catkin_pkgs/products_0/include/make_products_0/fun.h b/tests/system/resources/catkin_pkgs/products_0/include/make_products_0/fun.h new file mode 100644 index 00000000..dd5dcb94 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/products_0/include/make_products_0/fun.h @@ -0,0 +1,3 @@ +namespace make_products_0 { + int fun(); +} diff --git a/tests/system/resources/catkin_pkgs/products_0/lib.cpp b/tests/system/resources/catkin_pkgs/products_0/lib.cpp new file mode 100644 index 00000000..6911b46c --- /dev/null +++ b/tests/system/resources/catkin_pkgs/products_0/lib.cpp @@ -0,0 +1,4 @@ +#include +int make_products_0::fun() { + return 0; +} diff --git a/tests/system/resources/catkin_pkgs/products_0/main.cpp b/tests/system/resources/catkin_pkgs/products_0/main.cpp new file mode 100644 index 00000000..e9345a36 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/products_0/main.cpp @@ -0,0 +1,6 @@ + +#include + +int main(int argc, char** argv) { + return make_products_0::fun(); +} diff --git a/tests/system/resources/catkin_pkgs/products_0/package.xml b/tests/system/resources/catkin_pkgs/products_0/package.xml new file mode 100644 index 00000000..a1e45de5 --- /dev/null +++ b/tests/system/resources/catkin_pkgs/products_0/package.xml @@ -0,0 +1,9 @@ + + + products_0 + 0.1.0 + This package generates products in the make stage. + todo + BSD + catkin + diff --git a/tests/integrated/resources/pkg_with_test/CMakeLists.txt b/tests/system/resources/catkin_pkgs/python_tests/CMakeLists.txt similarity index 72% rename from tests/integrated/resources/pkg_with_test/CMakeLists.txt rename to tests/system/resources/catkin_pkgs/python_tests/CMakeLists.txt index c9e57bc1..c884bab3 100644 --- a/tests/integrated/resources/pkg_with_test/CMakeLists.txt +++ b/tests/system/resources/catkin_pkgs/python_tests/CMakeLists.txt @@ -1,9 +1,9 @@ cmake_minimum_required(VERSION 2.8.3) -project(pkg_with_test) +project(python_tests) find_package(catkin REQUIRED) catkin_package() -if (CATKIN_ENABLE_TESTING) +if(CATKIN_ENABLE_TESTING) catkin_add_nosetests(test_good.py) endif() diff --git a/tests/integrated/resources/pkg_with_broken_test/package.xml b/tests/system/resources/catkin_pkgs/python_tests/package.xml similarity index 69% rename from tests/integrated/resources/pkg_with_broken_test/package.xml rename to tests/system/resources/catkin_pkgs/python_tests/package.xml index b08b1e38..144a3280 100644 --- a/tests/integrated/resources/pkg_with_broken_test/package.xml +++ b/tests/system/resources/catkin_pkgs/python_tests/package.xml @@ -1,9 +1,9 @@ - pkg_with_broken_test + python_tests 0.1.0 BSD todo - Nothing to see here + This package contains a python test. catkin unittest diff --git a/tests/integrated/resources/pkg_with_broken_test/setup.py b/tests/system/resources/catkin_pkgs/python_tests/setup.py similarity index 100% rename from tests/integrated/resources/pkg_with_broken_test/setup.py rename to tests/system/resources/catkin_pkgs/python_tests/setup.py diff --git a/tests/integrated/resources/pkg_with_test/test_good.py b/tests/system/resources/catkin_pkgs/python_tests/test_good.py similarity index 99% rename from tests/integrated/resources/pkg_with_test/test_good.py rename to tests/system/resources/catkin_pkgs/python_tests/test_good.py index a604a74f..0ae625a2 100644 --- a/tests/integrated/resources/pkg_with_test/test_good.py +++ b/tests/system/resources/catkin_pkgs/python_tests/test_good.py @@ -4,6 +4,7 @@ class TestGood(unittest.TestCase): + def test_zero(self): self.assertEqual(0, 0) diff --git a/tests/integrated/resources/pkg_with_broken_test/CMakeLists.txt b/tests/system/resources/catkin_pkgs/python_tests_err/CMakeLists.txt similarity index 84% rename from tests/integrated/resources/pkg_with_broken_test/CMakeLists.txt rename to tests/system/resources/catkin_pkgs/python_tests_err/CMakeLists.txt index 71b985a2..4eac7224 100644 --- a/tests/integrated/resources/pkg_with_broken_test/CMakeLists.txt +++ b/tests/system/resources/catkin_pkgs/python_tests_err/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 2.8.3) -project(pkg_with_broken_test) +project(python_tests_err) find_package(catkin REQUIRED) catkin_package() diff --git a/tests/integrated/resources/pkg_with_test/package.xml b/tests/system/resources/catkin_pkgs/python_tests_err/package.xml similarity index 63% rename from tests/integrated/resources/pkg_with_test/package.xml rename to tests/system/resources/catkin_pkgs/python_tests_err/package.xml index 3b7791bb..aee9ed21 100644 --- a/tests/integrated/resources/pkg_with_test/package.xml +++ b/tests/system/resources/catkin_pkgs/python_tests_err/package.xml @@ -1,9 +1,9 @@ - pkg_with_test + python_tests_err 0.1.0 BSD todo - Nothing to see here + This package contains a unit test that should fail when run. catkin unittest diff --git a/tests/integrated/resources/pkg_with_test/setup.py b/tests/system/resources/catkin_pkgs/python_tests_err/setup.py similarity index 100% rename from tests/integrated/resources/pkg_with_test/setup.py rename to tests/system/resources/catkin_pkgs/python_tests_err/setup.py diff --git a/tests/integrated/resources/pkg_with_broken_test/test_bad.py b/tests/system/resources/catkin_pkgs/python_tests_err/test_bad.py similarity index 99% rename from tests/integrated/resources/pkg_with_broken_test/test_bad.py rename to tests/system/resources/catkin_pkgs/python_tests_err/test_bad.py index 2b99a5fc..f46637e8 100644 --- a/tests/integrated/resources/pkg_with_broken_test/test_bad.py +++ b/tests/system/resources/catkin_pkgs/python_tests_err/test_bad.py @@ -4,6 +4,7 @@ class TestBad(unittest.TestCase): + def test_zero(self): self.assertEqual(0, 1) diff --git a/tests/system/resources/cmake_pkgs/app_pkg/CMakeLists.txt b/tests/system/resources/cmake_pkgs/app_pkg/CMakeLists.txt new file mode 100644 index 00000000..e7a0a618 --- /dev/null +++ b/tests/system/resources/cmake_pkgs/app_pkg/CMakeLists.txt @@ -0,0 +1,12 @@ +cmake_minimum_required (VERSION 2.6) +project(app_pkg) + +add_executable(vanilla_app vanilla.cpp) +find_library(LIBVANILLA vanilla) +target_link_libraries(vanilla_app ${LIBVANILLA}) + +install(TARGETS vanilla_app + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib/static) + diff --git a/tests/system/resources/cmake_pkgs/app_pkg/package.xml b/tests/system/resources/cmake_pkgs/app_pkg/package.xml new file mode 100644 index 00000000..3f2d895e --- /dev/null +++ b/tests/system/resources/cmake_pkgs/app_pkg/package.xml @@ -0,0 +1,11 @@ + + app_pkg + vanilla CMake + 0.1.0 + BSD + jbo + lib_pkg + + cmake + + diff --git a/tests/system/resources/cmake_pkgs/app_pkg/vanilla.cpp b/tests/system/resources/cmake_pkgs/app_pkg/vanilla.cpp new file mode 100644 index 00000000..a642a494 --- /dev/null +++ b/tests/system/resources/cmake_pkgs/app_pkg/vanilla.cpp @@ -0,0 +1,8 @@ +#include + +#include + +int main(int argc, char **argv) { + vanilla(); + return 0; +} diff --git a/tests/system/resources/cmake_pkgs/cmake_pkg/CMakeLists.txt b/tests/system/resources/cmake_pkgs/cmake_pkg/CMakeLists.txt new file mode 100644 index 00000000..a2582066 --- /dev/null +++ b/tests/system/resources/cmake_pkgs/cmake_pkg/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required (VERSION 2.6) +project(cmake_pkg) + +add_executable(vanilla vanilla.cpp) + +install(TARGETS vanilla + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib/static) + diff --git a/tests/system/resources/cmake_pkgs/cmake_pkg/package.xml b/tests/system/resources/cmake_pkgs/cmake_pkg/package.xml new file mode 100644 index 00000000..059ea87d --- /dev/null +++ b/tests/system/resources/cmake_pkgs/cmake_pkg/package.xml @@ -0,0 +1,10 @@ + + cmake_pkg + vanilla CMake + 0.1.0 + BSD + jbo + + cmake + + diff --git a/tests/system/resources/cmake_pkgs/cmake_pkg/vanilla.cpp b/tests/system/resources/cmake_pkgs/cmake_pkg/vanilla.cpp new file mode 100644 index 00000000..a2011dbd --- /dev/null +++ b/tests/system/resources/cmake_pkgs/cmake_pkg/vanilla.cpp @@ -0,0 +1,6 @@ +#include + +int main(int argc, char **argv) { + + return 0; +} diff --git a/tests/system/resources/cmake_pkgs/lib_pkg/CMakeLists.txt b/tests/system/resources/cmake_pkgs/lib_pkg/CMakeLists.txt new file mode 100644 index 00000000..51a8ede7 --- /dev/null +++ b/tests/system/resources/cmake_pkgs/lib_pkg/CMakeLists.txt @@ -0,0 +1,15 @@ +cmake_minimum_required (VERSION 2.6) +project(lib_pkg) + +include_directories(${CMAKE_SOURCE_DIR}) +add_library(vanilla SHARED vanilla.cpp) + +INSTALL(FILES vanilla.h + DESTINATION include + ) + +install(TARGETS vanilla + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) + diff --git a/tests/system/resources/cmake_pkgs/lib_pkg/package.xml b/tests/system/resources/cmake_pkgs/lib_pkg/package.xml new file mode 100644 index 00000000..88ffb373 --- /dev/null +++ b/tests/system/resources/cmake_pkgs/lib_pkg/package.xml @@ -0,0 +1,10 @@ + + lib_pkg + vanilla CMake + 0.1.0 + BSD + jbo + + cmake + + diff --git a/tests/system/resources/cmake_pkgs/lib_pkg/vanilla.cpp b/tests/system/resources/cmake_pkgs/lib_pkg/vanilla.cpp new file mode 100644 index 00000000..62b9b17b --- /dev/null +++ b/tests/system/resources/cmake_pkgs/lib_pkg/vanilla.cpp @@ -0,0 +1,6 @@ +#include +#include + +void vanilla() { + std::cerr<<"vanilla."< + + pkg_with_roslint + 0.0.0 + The catkin_pkg_roslint package + jbohren + TODO + catkin + roslint + diff --git a/tests/system/verbs/catkin_build/test_args.py b/tests/system/verbs/catkin_build/test_args.py new file mode 100644 index 00000000..59eade05 --- /dev/null +++ b/tests/system/verbs/catkin_build/test_args.py @@ -0,0 +1,99 @@ + +from __future__ import print_function + +import os +import shutil + +from math import floor + +from ...workspace_factory import workspace_factory + +from ....utils import in_temporary_directory +from ....utils import assert_cmd_success +from ....utils import assert_cmd_failure +from ....utils import assert_files_exist +from ....utils import catkin_success +from ....utils import catkin_failure +from ....utils import redirected_stdio + + +from ....workspace_assertions import assert_workspace_initialized +from ....workspace_assertions import assert_no_warnings + +TEST_DIR = os.path.dirname(__file__) +RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') + +BUILD = ['build', '--no-notify', '--no-status'] +CLEAN = ['clean', '--all', '--force'] # , '--no-notify', '--no-color', '--no-status'] + + +def test_cmake_args(): + """Test passing CMake args to packages.""" + with workspace_factory() as wf: + shutil.copytree(os.path.join(RESOURCES_DIR, 'catkin_pkgs', 'cmake_args'), 'src') + + # cmake_args package requires all three vars to be set + assert catkin_failure( + BUILD + + ['cmake_args', '--no-deps'] + + ['--cmake-args', '-DVAR1=VAL1']) + + assert catkin_failure( + BUILD + + ['cmake_args', '--no-deps'] + + ['--cmake-args', '-DVAR1=VAL1', '-DVAR2=VAL2']) + + assert catkin_success( + BUILD + + ['cmake_args', '--no-deps'] + + ['--cmake-args', '-DVAR1=VAL1', '-DVAR2=VAL2', '-DVAR3=VAL3']) + + assert catkin_success( + BUILD + + ['cmake_args', '--no-deps'] + + ['--cmake-args', '-DVAR1=VAL1', '-DVAR2=VAL2', '-DVAR3=VAL3', '--']) + + +def test_no_cmake_args(): + """Test building when disabling CMake args in config""" + pass # TODO: Add test which sets cmake args with catkin config, then ignores them when building + + +def test_additional_cmake_args(): + """Test building when using additional CMake args to those config""" + pass # TODO: Add test which sets cmake args with catkin config, then adds more when building + + +def test_make_args(): + """Test passing arguments to the make command""" + pass # TODO: Implement this + + +def test_additional_make_args(): + """Test building when using additional make args to those config""" + pass # TODO: Add test which sets make args with catkin config, then adds more when building + + +def test_no_make_args(): + """Test building when disabling make args in config""" + pass # TODO: Add test which sets make args with catkin config, then ignores them when building + + +def test_catkin_make_args(): + """Test passing arguments to the make command for catkin packages only""" + pass # TODO: Implement this + + +def test_additional_catkin_make_args(): + """Test building when using additional catkin make args to those config""" + pass # TODO: Add test which sets catkin make args with catkin config, then adds more when building + + +def test_no_catkin_make_args(): + """Test building when disabling catkin make args in config""" + pass # TODO: Add test which sets catkin make args with catkin config, then ignores them when building + + +def test_jobs_args(): + """Test parallel jobs and parallel packages args""" + pass # TODO: Run catkin build, then check JobServer singleton diff --git a/tests/system/verbs/catkin_build/test_build.py b/tests/system/verbs/catkin_build/test_build.py new file mode 100644 index 00000000..e763d192 --- /dev/null +++ b/tests/system/verbs/catkin_build/test_build.py @@ -0,0 +1,203 @@ + +from __future__ import print_function + +import os +import shutil + +from math import floor + +from ...workspace_factory import workspace_factory + +from ....utils import in_temporary_directory +from ....utils import assert_cmd_success +from ....utils import assert_cmd_failure +from ....utils import assert_files_exist +from ....utils import catkin_success +from ....utils import catkin_failure +from ....utils import redirected_stdio + + +from ....workspace_assertions import assert_workspace_initialized +from ....workspace_assertions import assert_no_warnings + +TEST_DIR = os.path.dirname(__file__) +RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') + +BUILD = ['build', '--no-notify', '--no-status'] +CLEAN = ['clean', '--all'] # , '--force'] # , '--no-notify', '--no-color', '--no-status'] + +BUILD_TYPES = ['cmake', 'catkin'] + + +def create_flat_workspace(wf, build_type, n_pkgs): + """Create a bunch of packages with no interdependencies""" + for i in range(n_pkgs): + wf.create_package('pkg_{}'.format(i)) + + +def create_chain_workspace(wf, build_type, n_pkgs): + """Create a bunch of packages, each of which depends on one other in the + workspace except for the root.""" + for i in range(n_pkgs): + wf.create_package( + 'pkg_{}'.format(i), + depends=(['pkg_{}'.format(i - 1)] if i > 0 else [])) + + +def create_tree_workspace(wf, build_type, n_pkg_layers, n_children=2): + """Create a bunch of packages which form a balanced dependency tree""" + n_pkgs = pow(n_children, n_pkg_layers + 1) - 1 + for i in range(n_pkgs): + wf.create_package( + 'pkg_{}'.format(i), + depends=(['pkg_{}'.format(floor(i - 1) / n_children)] if i > 0 else [])) + return n_pkgs + + +@in_temporary_directory +def test_build_no_src(): + """Calling catkin build without a source space should fail.""" + assert catkin_failure(BUILD) + + +def test_build_auto_init_no_pkgs(): + """Test automatically initializing a workspace with no packages.""" + with redirected_stdio() as (out, err): + with workspace_factory() as wf: + wf.build() + assert catkin_failure(BUILD) + assert_workspace_initialized('.') + assert_no_warnings(out) + + +def test_build_auto_init_with_pkg(): + """Test automatically initializing a workspace.""" + with redirected_stdio() as (out, err): + with workspace_factory() as wf: + wf.create_package('pkg_a') + wf.build() + assert catkin_success(BUILD) + assert_workspace_initialized('.') + assert_no_warnings(out) + + +def test_build_dry_run(): + """Test showing the build jobs without doing anything.""" + with redirected_stdio() as (out, err): + for build_type in BUILD_TYPES: + with workspace_factory() as wf: + n_pkgs = create_tree_workspace(wf, build_type, 3) + wf.build() + assert catkin_success(BUILD + ['--dry-run']) + assert not os.path.exists('build') + assert not os.path.exists('devel') + + +def test_build_all_isolated(): + """Test building all packages in an isolated workspace""" + pass # TODO: Implement test + + +def test_build_all_merged(): + """Test building all packages in a merged workspace""" + pass # TODO: Implement test + + +def test_build_pkg(): + """Test building a package by name. + """ + with redirected_stdio() as (out, err): + for build_type in BUILD_TYPES: + with workspace_factory() as wf: + create_chain_workspace(wf, build_type, 4) + wf.build() + assert catkin_failure(BUILD + ['pkg_nil']) + assert catkin_success(BUILD + ['pkg_2']) + assert os.path.exists(os.path.join('build', 'pkg_0')) + assert os.path.exists(os.path.join('build', 'pkg_1')) + assert os.path.exists(os.path.join('build', 'pkg_2')) + assert not os.path.exists(os.path.join('build', 'pkg_3')) + + +def test_build_no_deps(): + """Test building a package by name without deps.""" + with redirected_stdio() as (out, err): + for build_type in BUILD_TYPES: + with workspace_factory() as wf: + create_chain_workspace(wf, build_type, 3) + wf.build() + + # --no-deps needs an argument + assert catkin_failure(BUILD + ['--no-deps']) + # only pkg_2 shuold be built + assert catkin_success(BUILD + ['pkg_2', '--no-deps']) + assert os.path.exists(os.path.join('build', 'pkg_2')) + assert not os.path.exists(os.path.join('build', 'pkg_1')) + assert not os.path.exists(os.path.join('build', 'pkg_0')) + + +def test_build_start_with(): + """Test building all packages starting with a specific one.""" + with redirected_stdio() as (out, err): + for build_type in BUILD_TYPES: + with workspace_factory() as wf: + create_chain_workspace(wf, build_type, 4) + wf.build() + + # --start-with needs an argument + assert catkin_failure(BUILD + ['--start-with']) + + # --start-with needs a valid package + assert catkin_failure(BUILD + ['--start-with', 'pkg_nil']) + + # this should build all packages + assert catkin_success(BUILD + ['--start-with', 'pkg_0']) + for i in range(4): + assert os.path.exists(os.path.join('build', 'pkg_{}'.format(i))) + assert catkin_success(CLEAN) + + # this should skip pkg_2's deps + assert catkin_success(BUILD + ['--start-with', 'pkg_2']) + assert not os.path.exists(os.path.join('build', 'pkg_0')) + assert not os.path.exists(os.path.join('build', 'pkg_1')) + assert os.path.exists(os.path.join('build', 'pkg_2')) + assert os.path.exists(os.path.join('build', 'pkg_3')) + assert catkin_success(CLEAN) + + +def test_unbuilt_isolated(): + """Test building unbuilt packages with an isolated develspace.""" + pass # TODO: This should succeed, but isn't implemented for isolated develspaces + + +def test_unbuilt_merged(): + """Test building unbuilt packages with a merged develspace.""" + pass # TODO: This should fail, but the check hsan't been tested + + +def test_continue_on_failure(): + """Test behavior when some packages fail to build.""" + pass # TODO: Write test + + +def test_preclean(): + """Test pre-cleaning packages in a workspace.""" + pass # TODO: Write test + + +def test_force_cmake(): + """Test forcing cmake to run on packages in a workspace.""" + pass # TODO: Write test + + +def test_install(): + """Test building and installing.""" + with redirected_stdio() as (out, err): + for build_type in BUILD_TYPES: + with workspace_factory() as wf: + create_chain_workspace(wf, build_type, 2) + wf.build() + + assert catkin_success(['config', '--install']) + assert catkin_success(BUILD) + assert os.path.exists(os.path.join('install')) diff --git a/tests/system/verbs/catkin_build/test_bwlists.py b/tests/system/verbs/catkin_build/test_bwlists.py new file mode 100644 index 00000000..8c9c4267 --- /dev/null +++ b/tests/system/verbs/catkin_build/test_bwlists.py @@ -0,0 +1,42 @@ + +from __future__ import print_function + +import os +import shutil + +from math import floor + +from ...workspace_factory import workspace_factory + +from ....utils import in_temporary_directory +from ....utils import assert_cmd_success +from ....utils import assert_cmd_failure +from ....utils import assert_files_exist +from ....utils import catkin_success +from ....utils import catkin_failure +from ....utils import redirected_stdio + + +from ....workspace_assertions import assert_workspace_initialized +from ....workspace_assertions import assert_no_warnings + +TEST_DIR = os.path.dirname(__file__) +RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') + +BUILD = ['build', '--no-notify', '--no-status'] +CLEAN = ['clean', '--all', '--force'] # , '--no-notify', '--no-color', '--no-status'] + + +def test_whitelist(): + """Test building whitelisted packages only""" + pass # TODO: Write test + + +def test_blacklist(): + """Test building all packages except blacklisted packages""" + pass # TODO: Write test + + +def test_blacklist_whitelist(): + """Test building with non-empty blacklist and whitelist""" + pass # TODO: Write test diff --git a/tests/system/verbs/catkin_build/test_context.py b/tests/system/verbs/catkin_build/test_context.py new file mode 100644 index 00000000..e8bc76f5 --- /dev/null +++ b/tests/system/verbs/catkin_build/test_context.py @@ -0,0 +1,37 @@ + +from __future__ import print_function + +import os +import shutil + +from math import floor + +from ...workspace_factory import workspace_factory + +from ....utils import in_temporary_directory +from ....utils import assert_cmd_success +from ....utils import assert_cmd_failure +from ....utils import assert_files_exist +from ....utils import catkin_success +from ....utils import catkin_failure +from ....utils import redirected_stdio + + +from ....workspace_assertions import assert_workspace_initialized +from ....workspace_assertions import assert_no_warnings + +TEST_DIR = os.path.dirname(__file__) +RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') + +BUILD = ['build', '--no-notify', '--no-status'] +CLEAN = ['clean', '--all', '--force'] # , '--no-notify', '--no-color', '--no-status'] + + +def test_build_this(): + """Test package context awareness""" + pass # TODO: Implement this (both negative and positive results) + + +def test_start_with_this(): + """Test package context awareness for --start-with option""" + pass # TODO: Implement this (both negative and positive results) diff --git a/tests/system/verbs/catkin_build/test_eclipse.py b/tests/system/verbs/catkin_build/test_eclipse.py new file mode 100644 index 00000000..a5729a17 --- /dev/null +++ b/tests/system/verbs/catkin_build/test_eclipse.py @@ -0,0 +1,38 @@ + +from __future__ import print_function + +import os +import shutil + +from ....utils import in_temporary_directory +from ....utils import assert_cmd_success +from ....utils import assert_cmd_failure +from ....utils import assert_files_exist +from ....utils import catkin_success +from ....utils import catkin_failure +from ....utils import redirected_stdio + + +from ....workspace_assertions import assert_workspace_initialized +from ....workspace_assertions import assert_no_warnings + +TEST_DIR = os.path.dirname(__file__) +RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') + + +@in_temporary_directory +def test_build_eclipse(): + cwd = os.getcwd() + source_space = os.path.join(cwd, 'src') + print("Creating source directory: %s" % source_space) + os.mkdir(source_space) + with redirected_stdio() as (out, err): + assert catkin_success( + ['create', 'pkg', '--rosdistro', 'hydro', '-p', source_space, 'pkg_a']), 'create pkg' + assert catkin_success( + ['build', '--no-notify', '--no-status', '--verbose', + '--cmake-args', '-GEclipse CDT4 - Unix Makefiles']) + assert_no_warnings(out) + assert_workspace_initialized('.') + assert_files_exist(os.path.join(cwd, 'build', 'pkg_a'), + ['.project', '.cproject']) diff --git a/tests/system/verbs/catkin_build/test_modify_ws.py b/tests/system/verbs/catkin_build/test_modify_ws.py new file mode 100644 index 00000000..f4cb5446 --- /dev/null +++ b/tests/system/verbs/catkin_build/test_modify_ws.py @@ -0,0 +1,52 @@ + +from __future__ import print_function + +import os +import shutil + +from math import floor + +from ...workspace_factory import workspace_factory + +from ....utils import in_temporary_directory +from ....utils import assert_cmd_success +from ....utils import assert_cmd_failure +from ....utils import assert_files_exist +from ....utils import catkin_success +from ....utils import catkin_failure +from ....utils import redirected_stdio + + +from ....workspace_assertions import assert_workspace_initialized +from ....workspace_assertions import assert_no_warnings + +TEST_DIR = os.path.dirname(__file__) +RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') + +BUILD = ['build', '--no-notify', '--no-status'] +CLEAN = ['clean', '--all', '--force'] # , '--no-notify', '--no-color', '--no-status'] + + +def test_add_package(): + """Test build behavior when adding packages to the workspace""" + pass # TODO: Implement this for various dependency relationships + + +def test_remove_package(): + """Test build behavior when removing packages from the workspace""" + pass # TODO: Implement this for various dependency relationships + + +def test_rename_package(): + """Test build behavior when renaming a package in the workspace""" + pass # TODO: Implement this for various dependency relationships + + +def test_ignore_package(): + """Test build behavior when adding a CATKIN_IGNORE file to a package in the workspace""" + pass # TODO: Implement this for various dependency relationships + + +def test_deblacklist(): + """Test build behavior when removing a package from the blacklist that has yet to be built""" + pass # TODO: Implement this for various dependency relationships diff --git a/tests/system/verbs/catkin_build/test_unit_tests.py b/tests/system/verbs/catkin_build/test_unit_tests.py new file mode 100644 index 00000000..9d1c01a2 --- /dev/null +++ b/tests/system/verbs/catkin_build/test_unit_tests.py @@ -0,0 +1,55 @@ + +from __future__ import print_function + +import os +import shutil + +from ....utils import in_temporary_directory +from ....utils import assert_cmd_success +from ....utils import assert_cmd_failure +from ....utils import assert_files_exist +from ....utils import catkin_success +from ....utils import catkin_failure +from ....utils import redirected_stdio + + +from ....workspace_assertions import assert_workspace_initialized +from ....workspace_assertions import assert_no_warnings + +TEST_DIR = os.path.dirname(__file__) +RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') + + +@in_temporary_directory +def test_build_pkg_unit_tests(): + """Test running working unit tests""" + cwd = os.getcwd() + source_space = os.path.join(cwd, 'src') + shutil.copytree(os.path.join(RESOURCES_DIR, 'catkin_pkgs', 'python_tests'), source_space) + with redirected_stdio() as (out, err): + assert catkin_success( + ['build', '--no-notify', '--no-status', '--verbose', '--no-deps', + 'python_tests', '--make-args', 'run_tests']) + assert_cmd_success(['catkin_test_results', 'build/python_tests']) + + assert catkin_success( + ['run_tests', 'python_tests', '--no-deps', '--no-notify', '--no-status']) + assert_cmd_success(['catkin_test_results', 'build/python_tests']) + + +@in_temporary_directory +def test_build_pkg_unit_tests_broken(): + """Test running broken unit tests""" + cwd = os.getcwd() + source_space = os.path.join(cwd, 'src') + shutil.copytree(os.path.join(RESOURCES_DIR, 'catkin_pkgs', 'python_tests_err'), source_space) + + with redirected_stdio() as (out, err): + assert catkin_success( + ['build', '--no-notify', '--no-status', '--verbose', '--no-deps', + 'python_tests_err', '--make-args', 'run_tests']) + assert_cmd_failure(['catkin_test_results', 'build/python_tests_err']) + + assert catkin_success( + ['run_tests', 'python_tests_err', '--no-deps', '--no-notify', '--no-status']) + assert_cmd_failure(['catkin_test_results', 'build/python_tests_err']) diff --git a/tests/system/verbs/catkin_build/test_whitespace_in_paths.py b/tests/system/verbs/catkin_build/test_whitespace_in_paths.py index 22ca41e6..cecdfe6c 100644 --- a/tests/system/verbs/catkin_build/test_whitespace_in_paths.py +++ b/tests/system/verbs/catkin_build/test_whitespace_in_paths.py @@ -2,35 +2,27 @@ from catkin_tools.commands.catkin import main +from ....utils import catkin_success +from ....utils import catkin_failure + from ...workspace_factory import workspace_factory def test_catkin_build_with_whitespace_in_paths(): with workspace_factory(source_space='source packages') as wf: - wf.add_package('foo', depends=['bar']) - wf.add_package('bar') + wf.create_package('foo', depends=['bar']) + wf.create_package('bar') wf.build() + print('Workspace: {0}'.format(wf.workspace)) + assert os.path.isdir(wf.workspace) + cmd = ['config', '--source', wf.source_space, '--devel', 'devel space', '--build', 'build space', '--install-space', 'install space'] - try: - main(cmd) - except SystemExit as exc: - ret = exc.code - if ret != 0: - import traceback - traceback.print_exc() - assert ret == 0, cmd + assert catkin_success(cmd), cmd cmd = ['build', '--no-status', '--no-notify', '--verbose'] - try: - ret = main(cmd) - except SystemExit as exc: - ret = exc.code - if ret != 0: - import traceback - traceback.print_exc() - assert ret == 0, cmd + assert catkin_success(cmd), cmd diff --git a/tests/integrated/__init__.py b/tests/system/verbs/catkin_config/__init__.py similarity index 100% rename from tests/integrated/__init__.py rename to tests/system/verbs/catkin_config/__init__.py diff --git a/tests/integrated/test_config.py b/tests/system/verbs/catkin_config/test_config.py similarity index 71% rename from tests/integrated/test_config.py rename to tests/system/verbs/catkin_config/test_config.py index 098a37ae..98886c50 100644 --- a/tests/integrated/test_config.py +++ b/tests/system/verbs/catkin_config/test_config.py @@ -1,11 +1,11 @@ import os -from ..utils import in_temporary_directory -from ..utils import assert_cmd_success +from ....utils import in_temporary_directory +from ....utils import assert_cmd_success -from ..workspace_assertions import assert_workspace_initialized -from ..workspace_assertions import assert_warning_message -from ..workspace_assertions import assert_no_warnings +from ....workspace_assertions import assert_workspace_initialized +from ....workspace_assertions import assert_warning_message +from ....workspace_assertions import assert_no_warnings @in_temporary_directory diff --git a/tests/system/verbs/catkin_init/__init__.py b/tests/system/verbs/catkin_init/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integrated/test_init.py b/tests/system/verbs/catkin_init/test_init.py similarity index 63% rename from tests/integrated/test_init.py rename to tests/system/verbs/catkin_init/test_init.py index 75b61f5f..150de52e 100644 --- a/tests/integrated/test_init.py +++ b/tests/system/verbs/catkin_init/test_init.py @@ -1,11 +1,11 @@ import os -from ..utils import in_temporary_directory -from ..utils import assert_cmd_success +from ....utils import in_temporary_directory +from ....utils import assert_cmd_success -from ..workspace_assertions import assert_workspace_initialized -from ..workspace_assertions import assert_warning_message -from ..workspace_assertions import assert_no_warnings +from ....workspace_assertions import assert_workspace_initialized +from ....workspace_assertions import assert_warning_message +from ....workspace_assertions import assert_no_warnings @in_temporary_directory diff --git a/tests/system/workspace_factory.py b/tests/system/workspace_factory.py index ae490aaf..9716568f 100644 --- a/tests/system/workspace_factory.py +++ b/tests/system/workspace_factory.py @@ -5,6 +5,7 @@ class workspace_factory(temporary_directory): + def __init__(self, source_space='src', prefix=''): super(workspace_factory, self).__init__(prefix=prefix) self.source_space = source_space @@ -19,22 +20,92 @@ def __exit__(self, exc_type, exc_value, traceback): class WorkspaceFactory(object): - def __init__(self, workspace, source_space): + + def __init__(self, workspace, source_space='src'): self.workspace = workspace self.source_space = os.path.join(self.workspace, source_space) self.packages = {} class Package(object): - def __init__(self, name, depends, build_depends, run_depends, test_depends): + + PACKAGE_XML_TEMPLATE = """\ + + + {name} + 0.0.0 + + Description for {name} + + + Firstname Lastname + MIT + +{depends_xml} + +{export_xml} + + +""" + PACKAGE_XML_EXPORT_TEMPLATE = """ + + {build_type} + """ + + def __init__(self, name, build_type, depends, build_depends, run_depends, test_depends): self.name = name + self.build_type = build_type self.build_depends = (build_depends or []) + (depends or []) self.run_depends = (run_depends or []) + (depends or []) self.test_depends = (test_depends or []) - def add_package(self, pkg_name, depends=None, build_depends=None, run_depends=None, test_depends=None): - self.packages[pkg_name] = self.Package(pkg_name, depends, build_depends, run_depends, test_depends) + def get_package_xml(self): + # Get dependencies + depends_xml = '\n'.join( + [' {0}'.format(x) for x in self.build_depends] + + [' {0}'.format(x) for x in self.run_depends] + + [' {0}'.format(x) for x in self.test_depends] + ) + + # Get exports section + if self.build_type == 'catkin': + export_xml = '' + else: + export_xml = self.PACKAGE_XML_EXPORT_TEMPLATE.format(build_type=self.build_type) + + # Format the package.xml template + return self.PACKAGE_XML_TEMPLATE.format( + name=self.name, + depends_xml=depends_xml, + export_xml=export_xml) + + def get_cmakelists_txt(self): + if self.build_type == 'catkin': + cmakelists_txt = """\ +cmake_minimum_required(VERSION 2.8.3) +project({name}) +find_package(catkin REQUIRED) +catkin_package() +add_custom_target(install)""" + elif self.build_type == 'cmake': + cmakelists_txt = """\ +cmake_minimum_required(VERSION 2.8.3) +project({name}) +add_custom_target(install)""" + + return cmakelists_txt.format( + name=self.name, + find_package=' '.join(self.build_depends)) + + def add_package(self, pkg_name, package_path): + """Copy a static package into the workspace""" + shutil.copytree(package_path, self.source_space) + + def create_package(self, pkg_name, build_type='cmake', depends=None, build_depends=None, run_depends=None, test_depends=None): + """Add a package to be generated in this workspace.""" + self.packages[pkg_name] = self.Package(pkg_name, build_type, depends, build_depends, run_depends, test_depends) def build(self): + """Generate workspace paths and packages.""" cwd = os.getcwd() if not os.path.isdir(self.workspace): if os.path.exists(self.workspace): @@ -50,42 +121,11 @@ def build(self): pkg_dir = os.path.join(self.source_space, name) os.makedirs(pkg_dir) pkg_xml_path = os.path.join(pkg_dir, 'package.xml') - pkg_xml = """\ - - - {name} - 0.0.0 - - Description for {name} - - - Firstname Lastname - MIT - -""" - pkg_xml += '\n'.join( - [' {0}'.format(x) for x in pkg.build_depends] + - [' {0}'.format(x) for x in pkg.run_depends] + - [' {0}'.format(x) for x in pkg.test_depends] - ) - pkg_xml += """ - - cmake - - -""" with open(pkg_xml_path, 'w') as f: - f.write(pkg_xml.format(name=name)) + f.write(pkg.get_package_xml()) cmakelists_txt_path = os.path.join(pkg_dir, 'CMakeLists.txt') - cmakelists_txt = """\ -cmake_minimum_required(VERSION 2.8.3) -project({name}) - -add_custom_target(install) - -""" with open(cmakelists_txt_path, 'w') as f: - f.write(cmakelists_txt.format(name=name, find_package=' '.join(pkg.build_depends))) + f.write(pkg.get_cmakelists_txt()) finally: os.chdir(cwd) diff --git a/tests/unit/test_runner.py b/tests/unit/test_runner.py deleted file mode 100644 index 51492b34..00000000 --- a/tests/unit/test_runner.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import print_function -from __future__ import unicode_literals - -import os -import mock - -from catkin_tools.runner.run_unix import run_command - -from nose.tools import eq_ as assert_eq - -TEST_DIR = os.path.dirname(__file__) - - -@mock.patch('catkin_tools.runner.run_unix.run_command') -def test_runner_ascii(patched_func): - cmd = ['cat', os.path.join(TEST_DIR, 'ascii_text.txt')] - for line in run_command(cmd): - if type(line) == int: - assert_eq(line, 0) - else: - assert_eq(line.rstrip(), 'Hello ASCII!') - - -@mock.patch('catkin_tools.runner.run_unix.run_command') -def test_runner_unicode(patched_func): - cmd = ['cat', os.path.join(TEST_DIR, 'unicode_text.txt')] - for line in run_command(cmd): - if type(line) == int: - assert_eq(line, 0) - else: - if line.rstrip() != 'Hello Unicode\u203d': - print('WARNING: Unicode reading not supported!') diff --git a/tests/utils.py b/tests/utils.py index fe0164fa..58e185fb 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -10,6 +10,8 @@ import subprocess +from catkin_tools.commands.catkin import main as catkin_main + try: # Python2 from StringIO import StringIO @@ -27,6 +29,32 @@ class TimeoutExpired(object): MOCK_DIR = os.path.join(TESTS_DIR, 'mock_resources') +def catkin_success(args, env={}): + orig_environ = dict(os.environ) + try: + catkin_main(args) + except SystemExit as exc: + ret = exc.code + if ret != 0: + import traceback + traceback.print_exc() + finally: + os.environ = orig_environ + return ret == 0 + + +def catkin_failure(args, env={}): + orig_environ = dict(os.environ) + try: + os.environ.update(env) + catkin_main(args) + except SystemExit as exc: + ret = exc.code + finally: + os.environ = orig_environ + return ret != 0 + + class AssertRaisesContext(object): def __init__(self, expected, expected_regex=None): @@ -64,14 +92,18 @@ class redirected_stdio(object): def __enter__(self): self.original_stdout = sys.stdout self.original_stderr = sys.stderr - sys.stdout = out = StringIO() - sys.stderr = err = StringIO() - return out, err + self.out = StringIO() + self.err = StringIO() + sys.stdout = self.out + sys.stderr = self.err + return self.out, self.err def __exit__(self, exc_type, exc_value, traceback): sys.stdout = self.original_stdout sys.stderr = self.original_stderr + print(self.out.getvalue()) + class temporary_directory(object): From 8e2df65a5c3fc6b61f424547972137a1e55d9e2c Mon Sep 17 00:00:00 2001 From: Jonathan Bohren Date: Mon, 14 Dec 2015 20:49:19 -0500 Subject: [PATCH 02/11] cli: Fixing poorly caught interrupt in catkin command --- catkin_tools/commands/catkin.py | 266 ++++++++++++++++---------------- 1 file changed, 135 insertions(+), 131 deletions(-) diff --git a/catkin_tools/commands/catkin.py b/catkin_tools/commands/catkin.py index e1eca139..369d574a 100644 --- a/catkin_tools/commands/catkin.py +++ b/catkin_tools/commands/catkin.py @@ -82,138 +82,142 @@ def create_subparsers(parser, verbs): def main(sysargs=None): - # Initialize config try: - initialize_config() - except RuntimeError as exc: - sys.exit("Failed to initialize config: {0}".format(exc)) - - # Create a top level parser - parser = argparse.ArgumentParser(description="catkin command", formatter_class=argparse.RawDescriptionHelpFormatter) - add = parser.add_argument - add('-a', '--list-aliases', action="store_true", default=False, - help="Lists the current verb aliases and then quits, all other arguments are ignored") - add('--test-colors', action='store_true', default=False, - help="Prints a color test pattern to the screen and then quits, all other arguments are ignored") - color_control_group = parser.add_mutually_exclusive_group() - add = color_control_group.add_argument - add('--force-color', action='store_true', default=False, - help='Forces catkin to output in color, even when the terminal does not appear to support it.') - add('--no-color', action='store_true', default=False, - help='Forces catkin to not use color in the output, regardless of the detect terminal type.') - - # Generate a list of verbs available - verbs = list_verbs() - - # Create the subparsers for each verb and collect the argument preprocessors - argument_preprocessors = create_subparsers(parser, verbs) - - # Get verb aliases - verb_aliases = get_verb_aliases() - - # Setup sysargs - sysargs = sys.argv[1:] if sysargs is None else sysargs - cmd = os.path.basename(sys.argv[0]) - - # Get colors config - no_color = False - force_color = False - for arg in sysargs: - if arg == '--no-color': - no_color = True - if arg == '--force-color': - force_color = True - - if no_color or not force_color and not is_tty(sys.stdout): - set_color(False) - - # Check for --test-colors - for arg in sysargs: - if arg == '--test-colors': - test_colors() - sys.exit(0) - if not arg.startswith('-'): - break - - # Check for --list-aliases - for arg in sysargs: - if arg == '--list-aliases' or arg == '-a': - for alias in sorted(list(verb_aliases.keys())): - print("{0}: {1}".format(alias, verb_aliases[alias])) - sys.exit(0) - if not arg.startswith('-'): - break - - # Do alias expansion - expanding_verb_aliases = True - used_aliases = [] - while expanding_verb_aliases: - expanding_verb_aliases = False - for index, arg in enumerate(sysargs): + # Initialize config + try: + initialize_config() + except RuntimeError as exc: + sys.exit("Failed to initialize config: {0}".format(exc)) + + # Create a top level parser + parser = argparse.ArgumentParser( + description="catkin command", formatter_class=argparse.RawDescriptionHelpFormatter) + add = parser.add_argument + add('-a', '--list-aliases', action="store_true", default=False, + help="Lists the current verb aliases and then quits, all other arguments are ignored") + add('--test-colors', action='store_true', default=False, + help="Prints a color test pattern to the screen and then quits, all other arguments are ignored") + color_control_group = parser.add_mutually_exclusive_group() + add = color_control_group.add_argument + add('--force-color', action='store_true', default=False, + help='Forces catkin to output in color, even when the terminal does not appear to support it.') + add('--no-color', action='store_true', default=False, + help='Forces catkin to not use color in the output, regardless of the detect terminal type.') + + # Generate a list of verbs available + verbs = list_verbs() + + # Create the subparsers for each verb and collect the argument preprocessors + argument_preprocessors = create_subparsers(parser, verbs) + + # Get verb aliases + verb_aliases = get_verb_aliases() + + # Setup sysargs + sysargs = sys.argv[1:] if sysargs is None else sysargs + cmd = os.path.basename(sys.argv[0]) + + # Get colors config + no_color = False + force_color = False + for arg in sysargs: + if arg == '--no-color': + no_color = True + if arg == '--force-color': + force_color = True + + if no_color or not force_color and not is_tty(sys.stdout): + set_color(False) + + # Check for --test-colors + for arg in sysargs: + if arg == '--test-colors': + test_colors() + sys.exit(0) + if not arg.startswith('-'): + break + + # Check for --list-aliases + for arg in sysargs: + if arg == '--list-aliases' or arg == '-a': + for alias in sorted(list(verb_aliases.keys())): + print("{0}: {1}".format(alias, verb_aliases[alias])) + sys.exit(0) if not arg.startswith('-'): - if arg in used_aliases: - print(fmt( - "@!@{gf}==>@| Expanding alias '@!@{yf}" + - arg + - "@|' was previously expanded, ignoring this time to prevent infinite recursion." - )) - if arg in verb_aliases: - before = [] if index == 0 else sysargs[:index - 1] - after = [] if index == len(sysargs) else sysargs[index + 1:] - sysargs = before + verb_aliases[arg].split() + after - print(fmt( - "@!@{gf}==>@| Expanding alias " - "'@!@{yf}{alias}@|' " - "from '@{yf}{before} @!{alias}@{boldoff}{after}@|' " - "to '@{yf}{before} @!{expansion}@{boldoff}{after}@|'" - ).format( - alias=arg, - expansion=verb_aliases[arg], - before=' '.join([cmd] + before), - after=(' '.join([''] + after) if after else '') - )) - expanding_verb_aliases = True - # Prevent the alias from being used again, to prevent infinite recursion - used_aliases.append(arg) - del verb_aliases[arg] break - # Determine the verb, splitting arguments into pre and post verb - verb = None - pre_verb_args = [] - post_verb_args = [] - for index, arg in enumerate(sysargs): - # If the arg does not start with a `-` then it is a positional argument - # The first positional argument must be the verb - if not arg.startswith('-'): - verb = arg - post_verb_args = sysargs[index + 1:] - break - # If the `-h` or `--help` option comes before the verb, parse_args - if arg in ['-h', '--help']: - parser.parse_args(sysargs) - # Otherwise it is a pre-verb option - pre_verb_args.append(arg) - - # Error on no verb provided - if verb is None: - print(parser.format_usage()) - sys.exit("Error: No verb provided.") - # Error on unknown verb provided - if verb not in verbs: - print(parser.format_usage()) - sys.exit("Error: Unknown verb '{0}' provided.".format(verb)) - - # First allow the verb's argument preprocessor to strip any args - # and return any "extra" information it wants as a dict - processed_post_verb_args, extras = argument_preprocessors[verb](post_verb_args) - # Then allow argparse to process the left over post-verb arguments along - # with the pre-verb arguments and the verb itself - args = parser.parse_args(pre_verb_args + [verb] + processed_post_verb_args) - # Extend the argparse result with the extras from the preprocessor - for key, value in extras.items(): - setattr(args, key, value) - - # Finally call the subparser's main function with the processed args - # and the extras which the preprocessor may have returned - sys.exit(args.main(args) or 0) + # Do alias expansion + expanding_verb_aliases = True + used_aliases = [] + while expanding_verb_aliases: + expanding_verb_aliases = False + for index, arg in enumerate(sysargs): + if not arg.startswith('-'): + if arg in used_aliases: + print(fmt( + "@!@{gf}==>@| Expanding alias '@!@{yf}" + + arg + + "@|' was previously expanded, ignoring this time to prevent infinite recursion." + )) + if arg in verb_aliases: + before = [] if index == 0 else sysargs[:index - 1] + after = [] if index == len(sysargs) else sysargs[index + 1:] + sysargs = before + verb_aliases[arg].split() + after + print(fmt( + "@!@{gf}==>@| Expanding alias " + "'@!@{yf}{alias}@|' " + "from '@{yf}{before} @!{alias}@{boldoff}{after}@|' " + "to '@{yf}{before} @!{expansion}@{boldoff}{after}@|'" + ).format( + alias=arg, + expansion=verb_aliases[arg], + before=' '.join([cmd] + before), + after=(' '.join([''] + after) if after else '') + )) + expanding_verb_aliases = True + # Prevent the alias from being used again, to prevent infinite recursion + used_aliases.append(arg) + del verb_aliases[arg] + break + + # Determine the verb, splitting arguments into pre and post verb + verb = None + pre_verb_args = [] + post_verb_args = [] + for index, arg in enumerate(sysargs): + # If the arg does not start with a `-` then it is a positional argument + # The first positional argument must be the verb + if not arg.startswith('-'): + verb = arg + post_verb_args = sysargs[index + 1:] + break + # If the `-h` or `--help` option comes before the verb, parse_args + if arg in ['-h', '--help']: + parser.parse_args(sysargs) + # Otherwise it is a pre-verb option + pre_verb_args.append(arg) + + # Error on no verb provided + if verb is None: + print(parser.format_usage()) + sys.exit("Error: No verb provided.") + # Error on unknown verb provided + if verb not in verbs: + print(parser.format_usage()) + sys.exit("Error: Unknown verb '{0}' provided.".format(verb)) + + # First allow the verb's argument preprocessor to strip any args + # and return any "extra" information it wants as a dict + processed_post_verb_args, extras = argument_preprocessors[verb](post_verb_args) + # Then allow argparse to process the left over post-verb arguments along + # with the pre-verb arguments and the verb itself + args = parser.parse_args(pre_verb_args + [verb] + processed_post_verb_args) + # Extend the argparse result with the extras from the preprocessor + for key, value in extras.items(): + setattr(args, key, value) + + # Finally call the subparser's main function with the processed args + # and the extras which the preprocessor may have returned + sys.exit(args.main(args) or 0) + except KeyboardInterrupt: + print('Interrupted by user!') From c187c324a1b3fc1b1a96f62d7271b3f1e696a6d6 Mon Sep 17 00:00:00 2001 From: Jonathan Bohren Date: Mon, 14 Dec 2015 20:51:20 -0500 Subject: [PATCH 03/11] color: Making it so the fmt() function can be used without adding reset characters. This fixes output issues in some non-ANSI-compatible contexts. --- catkin_tools/terminal_color.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/catkin_tools/terminal_color.py b/catkin_tools/terminal_color.py index f7ac5872..1851ee41 100644 --- a/catkin_tools/terminal_color.py +++ b/catkin_tools/terminal_color.py @@ -124,7 +124,7 @@ def sanitize(msg): return msg -def fmt(msg): +def fmt(msg, reset=True): """Replaces color annotations with ansi escape sequences""" global _ansi msg = msg.replace('@!', '@{boldon}') @@ -132,7 +132,7 @@ def fmt(msg): msg = msg.replace('@_', '@{ulon}') msg = msg.replace('@|', '@{reset}') t = ColorTemplate(msg) - return t.substitute(_ansi) + ansi('reset') + return t.substitute(_ansi) + (ansi('reset') if reset else '') def test_colors(): From eb9a4d6b21c39474d9c200c2a6bcec8a83399e7e Mon Sep 17 00:00:00 2001 From: Jonathan Bohren Date: Mon, 14 Dec 2015 20:52:09 -0500 Subject: [PATCH 04/11] list: Removing unused imports --- catkin_tools/verbs/catkin_list/cli.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/catkin_tools/verbs/catkin_list/cli.py b/catkin_tools/verbs/catkin_list/cli.py index 09fe6d04..60d2ef94 100644 --- a/catkin_tools/verbs/catkin_list/cli.py +++ b/catkin_tools/verbs/catkin_list/cli.py @@ -14,14 +14,12 @@ from __future__ import print_function -import os import sys from catkin_tools.argument_parsing import add_context_args from catkin_tools.context import Context -from catkin_tools.metadata import find_enclosing_workspace from catkin_pkg.packages import find_packages from catkin_pkg.package import InvalidPackage From 6e0d0a75d5fda8d03dca159a917e6852610d3dbd Mon Sep 17 00:00:00 2001 From: Jonathan Bohren Date: Mon, 14 Dec 2015 20:53:52 -0500 Subject: [PATCH 05/11] context: Making context loading fail more gracefully when there are invalid keys in the saved context. Also adding some helper functions to standardize constructing package-specific workspace paths. --- catkin_tools/context.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/catkin_tools/context.py b/catkin_tools/context.py index 0715acf5..7d7e8dbf 100644 --- a/catkin_tools/context.py +++ b/catkin_tools/context.py @@ -200,7 +200,8 @@ def __init__( catkin_make_args=None, space_suffix=None, whitelist=None, - blacklist=None + blacklist=None, + **kwargs ): """Creates a new Context object, optionally initializing with parameters @@ -242,6 +243,10 @@ def __init__( """ self.__locked = False + # Check for unhandled context options + if len(kwargs) > 0: + print('Warning: Unhandled config context options: {}'.format(kwargs)) + # Validation is done on assignment # Handle *space assignment and defaults self.workspace = workspace @@ -325,7 +330,7 @@ def load_env(self): self.env_cmake_prefix_path = ':'.join(split_result_cmake_prefix_path[1:]) else: - self.env_cmake_prefix_path = os.environ.get('CMAKE_PREFIX_PATH', '') + self.env_cmake_prefix_path = os.environ.get('CMAKE_PREFIX_PATH', '').rstrip(':') # Add warnings based on conflicing CMAKE_PREFIX_PATH if self.cached_cmake_prefix_path and self.extend_path: @@ -673,3 +678,18 @@ def blacklist(self): @blacklist.setter def blacklist(self, value): self.__blacklist = value + + def package_build_space(self, package): + return os.path.join(self.build_space_abs, package.name) + + def package_devel_space(self, package): + if self.isolate_devel: + return os.path.join(self.devel_space_abs, package.name) + else: + return self.devel_space_abs + + def package_install_space(self, package): + if self.isolate_install: + return os.path.join(self.install_space_abs, package.name) + else: + return self.install_space_abs From d4fe5d20c3c03cd4b583d9e5860000ae4840c3e3 Mon Sep 17 00:00:00 2001 From: Jonathan Bohren Date: Mon, 14 Dec 2015 20:54:40 -0500 Subject: [PATCH 06/11] create: Removing unused import --- catkin_tools/verbs/catkin_create/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/catkin_tools/verbs/catkin_create/__init__.py b/catkin_tools/verbs/catkin_create/__init__.py index 7ffaf8ac..66462774 100644 --- a/catkin_tools/verbs/catkin_create/__init__.py +++ b/catkin_tools/verbs/catkin_create/__init__.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from catkin_tools.argument_parsing import argument_preprocessor from .cli import main from .cli import prepare_arguments From 407f38cde315d40decc711887ba66e5344a5ab28 Mon Sep 17 00:00:00 2001 From: William Woodall Date: Mon, 14 Dec 2015 23:08:55 -0800 Subject: [PATCH 07/11] simplify the catkin verb commands main function --- catkin_tools/commands/catkin.py | 294 +++++++++++++++++--------------- 1 file changed, 158 insertions(+), 136 deletions(-) diff --git a/catkin_tools/commands/catkin.py b/catkin_tools/commands/catkin.py index 369d574a..97f15128 100644 --- a/catkin_tools/commands/catkin.py +++ b/catkin_tools/commands/catkin.py @@ -81,143 +81,165 @@ def create_subparsers(parser, verbs): return argument_preprocessors +def expand_one_verb_alias(sysargs, verb_aliases, used_aliases): + """Iterate through sysargs looking for expandable verb aliases. + + When a verb alias is found, sysargs is modified to effectively expand the alias. + The alias is removed from verb_aliases and added to used_aliases. + After finding and expanding an alias, this function returns True. + If no alias is found to be expanded, this function returns False. + """ + cmd = os.path.basename(sys.argv[0]) + for index, arg in enumerate(sysargs): + if arg.startswith('-'): + # Not a verb, continue through the arguments + continue + if arg in used_aliases: + print(fmt( + "@!@{gf}==>@| Expanding alias '@!@{yf}" + arg + + "@|' was previously expanded, ignoring this time to prevent infinite recursion." + )) + if arg in verb_aliases: + before = [] if index == 0 else sysargs[:index - 1] + after = [] if index == len(sysargs) else sysargs[index + 1:] + sysargs[:] = before + verb_aliases[arg].split() + after + print(fmt( + "@!@{gf}==>@| Expanding alias " + "'@!@{yf}{alias}@|' " + "from '@{yf}{before} @!{alias}@{boldoff}{after}@|' " + "to '@{yf}{before} @!{expansion}@{boldoff}{after}@|'" + ).format( + alias=arg, + expansion=verb_aliases[arg], + before=' '.join([cmd] + before), + after=(' '.join([''] + after) if after else '') + )) + # Prevent the alias from being used again, to prevent infinite recursion + used_aliases.append(arg) + del verb_aliases[arg] + # Return True since one has been found + return True + # Return False since no verb alias was found + return False + + +def expand_verb_aliases(sysargs, verb_aliases): + """Expands aliases in sysargs which are found in verb_aliases until none are found.""" + used_aliases = [] + while expand_one_verb_alias(sysargs, verb_aliases, used_aliases): + pass + return sysargs + + +def catkin_main(sysargs): + # Initialize config + try: + initialize_config() + except RuntimeError as exc: + sys.exit("Failed to initialize config: {0}".format(exc)) + + # Create a top level parser + parser = argparse.ArgumentParser( + description="catkin command", formatter_class=argparse.RawDescriptionHelpFormatter) + add = parser.add_argument + add('-a', '--list-aliases', action="store_true", default=False, + help="Lists the current verb aliases and then quits, all other arguments are ignored") + add('--test-colors', action='store_true', default=False, + help="Prints a color test pattern to the screen and then quits, all other arguments are ignored") + color_control_group = parser.add_mutually_exclusive_group() + add = color_control_group.add_argument + add('--force-color', action='store_true', default=False, + help='Forces catkin to output in color, even when the terminal does not appear to support it.') + add('--no-color', action='store_true', default=False, + help='Forces catkin to not use color in the output, regardless of the detect terminal type.') + + # Generate a list of verbs available + verbs = list_verbs() + + # Create the subparsers for each verb and collect the argument preprocessors + argument_preprocessors = create_subparsers(parser, verbs) + + # Get verb aliases + verb_aliases = get_verb_aliases() + + # Setup sysargs + sysargs = sys.argv[1:] if sysargs is None else sysargs + + # Get colors config + no_color = False + force_color = False + for arg in sysargs: + if arg == '--no-color': + no_color = True + if arg == '--force-color': + force_color = True + + if no_color or not force_color and not is_tty(sys.stdout): + set_color(False) + + # Check for --test-colors + for arg in sysargs: + if arg == '--test-colors': + test_colors() + sys.exit(0) + if not arg.startswith('-'): + break + + # Check for --list-aliases + for arg in sysargs: + if arg == '--list-aliases' or arg == '-a': + for alias in sorted(list(verb_aliases.keys())): + print("{0}: {1}".format(alias, verb_aliases[alias])) + sys.exit(0) + if not arg.startswith('-'): + break + + # Do verb alias expansion + sysargs = expand_verb_aliases(sysargs, verb_aliases) + + # Determine the verb, splitting arguments into pre and post verb + verb = None + pre_verb_args = [] + post_verb_args = [] + for index, arg in enumerate(sysargs): + # If the arg does not start with a `-` then it is a positional argument + # The first positional argument must be the verb + if not arg.startswith('-'): + verb = arg + post_verb_args = sysargs[index + 1:] + break + # If the `-h` or `--help` option comes before the verb, parse_args + if arg in ['-h', '--help']: + parser.parse_args(sysargs) + # Otherwise it is a pre-verb option + pre_verb_args.append(arg) + + # Error on no verb provided + if verb is None: + print(parser.format_usage()) + sys.exit("Error: No verb provided.") + # Error on unknown verb provided + if verb not in verbs: + print(parser.format_usage()) + sys.exit("Error: Unknown verb '{0}' provided.".format(verb)) + + # First allow the verb's argument preprocessor to strip any args + # and return any "extra" information it wants as a dict + processed_post_verb_args, extras = argument_preprocessors[verb](post_verb_args) + # Then allow argparse to process the left over post-verb arguments along + # with the pre-verb arguments and the verb itself + args = parser.parse_args(pre_verb_args + [verb] + processed_post_verb_args) + # Extend the argparse result with the extras from the preprocessor + for key, value in extras.items(): + setattr(args, key, value) + + # Finally call the subparser's main function with the processed args + # and the extras which the preprocessor may have returned + sys.exit(args.main(args) or 0) + + def main(sysargs=None): try: - # Initialize config - try: - initialize_config() - except RuntimeError as exc: - sys.exit("Failed to initialize config: {0}".format(exc)) - - # Create a top level parser - parser = argparse.ArgumentParser( - description="catkin command", formatter_class=argparse.RawDescriptionHelpFormatter) - add = parser.add_argument - add('-a', '--list-aliases', action="store_true", default=False, - help="Lists the current verb aliases and then quits, all other arguments are ignored") - add('--test-colors', action='store_true', default=False, - help="Prints a color test pattern to the screen and then quits, all other arguments are ignored") - color_control_group = parser.add_mutually_exclusive_group() - add = color_control_group.add_argument - add('--force-color', action='store_true', default=False, - help='Forces catkin to output in color, even when the terminal does not appear to support it.') - add('--no-color', action='store_true', default=False, - help='Forces catkin to not use color in the output, regardless of the detect terminal type.') - - # Generate a list of verbs available - verbs = list_verbs() - - # Create the subparsers for each verb and collect the argument preprocessors - argument_preprocessors = create_subparsers(parser, verbs) - - # Get verb aliases - verb_aliases = get_verb_aliases() - - # Setup sysargs - sysargs = sys.argv[1:] if sysargs is None else sysargs - cmd = os.path.basename(sys.argv[0]) - - # Get colors config - no_color = False - force_color = False - for arg in sysargs: - if arg == '--no-color': - no_color = True - if arg == '--force-color': - force_color = True - - if no_color or not force_color and not is_tty(sys.stdout): - set_color(False) - - # Check for --test-colors - for arg in sysargs: - if arg == '--test-colors': - test_colors() - sys.exit(0) - if not arg.startswith('-'): - break - - # Check for --list-aliases - for arg in sysargs: - if arg == '--list-aliases' or arg == '-a': - for alias in sorted(list(verb_aliases.keys())): - print("{0}: {1}".format(alias, verb_aliases[alias])) - sys.exit(0) - if not arg.startswith('-'): - break - - # Do alias expansion - expanding_verb_aliases = True - used_aliases = [] - while expanding_verb_aliases: - expanding_verb_aliases = False - for index, arg in enumerate(sysargs): - if not arg.startswith('-'): - if arg in used_aliases: - print(fmt( - "@!@{gf}==>@| Expanding alias '@!@{yf}" + - arg + - "@|' was previously expanded, ignoring this time to prevent infinite recursion." - )) - if arg in verb_aliases: - before = [] if index == 0 else sysargs[:index - 1] - after = [] if index == len(sysargs) else sysargs[index + 1:] - sysargs = before + verb_aliases[arg].split() + after - print(fmt( - "@!@{gf}==>@| Expanding alias " - "'@!@{yf}{alias}@|' " - "from '@{yf}{before} @!{alias}@{boldoff}{after}@|' " - "to '@{yf}{before} @!{expansion}@{boldoff}{after}@|'" - ).format( - alias=arg, - expansion=verb_aliases[arg], - before=' '.join([cmd] + before), - after=(' '.join([''] + after) if after else '') - )) - expanding_verb_aliases = True - # Prevent the alias from being used again, to prevent infinite recursion - used_aliases.append(arg) - del verb_aliases[arg] - break - - # Determine the verb, splitting arguments into pre and post verb - verb = None - pre_verb_args = [] - post_verb_args = [] - for index, arg in enumerate(sysargs): - # If the arg does not start with a `-` then it is a positional argument - # The first positional argument must be the verb - if not arg.startswith('-'): - verb = arg - post_verb_args = sysargs[index + 1:] - break - # If the `-h` or `--help` option comes before the verb, parse_args - if arg in ['-h', '--help']: - parser.parse_args(sysargs) - # Otherwise it is a pre-verb option - pre_verb_args.append(arg) - - # Error on no verb provided - if verb is None: - print(parser.format_usage()) - sys.exit("Error: No verb provided.") - # Error on unknown verb provided - if verb not in verbs: - print(parser.format_usage()) - sys.exit("Error: Unknown verb '{0}' provided.".format(verb)) - - # First allow the verb's argument preprocessor to strip any args - # and return any "extra" information it wants as a dict - processed_post_verb_args, extras = argument_preprocessors[verb](post_verb_args) - # Then allow argparse to process the left over post-verb arguments along - # with the pre-verb arguments and the verb itself - args = parser.parse_args(pre_verb_args + [verb] + processed_post_verb_args) - # Extend the argparse result with the extras from the preprocessor - for key, value in extras.items(): - setattr(args, key, value) - - # Finally call the subparser's main function with the processed args - # and the extras which the preprocessor may have returned - sys.exit(args.main(args) or 0) + catkin_main(sysargs) except KeyboardInterrupt: print('Interrupted by user!') From 01a6c05bfa7104310c56afaf23546eb2f04c706a Mon Sep 17 00:00:00 2001 From: William Woodall Date: Mon, 14 Dec 2015 23:09:19 -0800 Subject: [PATCH 08/11] add missing copyright --- catkin_tools/resultspace.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/catkin_tools/resultspace.py b/catkin_tools/resultspace.py index 0c0ba074..3a64f20e 100644 --- a/catkin_tools/resultspace.py +++ b/catkin_tools/resultspace.py @@ -1,3 +1,16 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re From 9a8f52730aafa5b59d01227b72ab3db477840542 Mon Sep 17 00:00:00 2001 From: William Woodall Date: Mon, 14 Dec 2015 23:11:16 -0800 Subject: [PATCH 09/11] fix missing symbol in catkin build job --- catkin_tools/verbs/catkin_build/job.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/catkin_tools/verbs/catkin_build/job.py b/catkin_tools/verbs/catkin_build/job.py index 56bd26ea..1fcb3b52 100644 --- a/catkin_tools/verbs/catkin_build/job.py +++ b/catkin_tools/verbs/catkin_build/job.py @@ -27,6 +27,12 @@ from .common import generate_env_file from .common import get_python_install_dir +# FileNotFoundError is only defined in Python3, but IOError can be used. +try: + FileNotFoundError +except NameError: + FileNotFoundError = IOError + MAKE_EXEC = which('make') CMAKE_EXEC = which('cmake') From 69e8a4026e6229432f1ab5c99841a8f975f27b5c Mon Sep 17 00:00:00 2001 From: William Woodall Date: Mon, 14 Dec 2015 23:14:15 -0800 Subject: [PATCH 10/11] [style] fixup unused imports and other things --- catkin_tools/context.py | 33 ++++++++++--------- catkin_tools/make_jobserver.py | 4 +-- catkin_tools/runner/run_windows.py | 2 +- catkin_tools/verbs/catkin_build/common.py | 6 ---- catkin_tools/verbs/catkin_create/__init__.py | 1 - catkin_tools/verbs/catkin_list/cli.py | 1 - tests/system/verbs/catkin_build/test_args.py | 14 +------- tests/system/verbs/catkin_build/test_build.py | 7 +--- .../system/verbs/catkin_build/test_bwlists.py | 18 ---------- .../system/verbs/catkin_build/test_context.py | 18 ---------- .../system/verbs/catkin_build/test_eclipse.py | 5 --- .../verbs/catkin_build/test_modify_ws.py | 18 ---------- .../verbs/catkin_build/test_unit_tests.py | 7 ---- .../catkin_build/test_whitespace_in_paths.py | 3 -- tests/system/workspace_factory.py | 10 +++++- tests/utils.py | 1 - tests/workspace_assertions.py | 1 - 17 files changed, 32 insertions(+), 117 deletions(-) diff --git a/catkin_tools/context.py b/catkin_tools/context.py index 7d7e8dbf..0e3f4b05 100644 --- a/catkin_tools/context.py +++ b/catkin_tools/context.py @@ -54,25 +54,28 @@ class Context(object): DEFAULT_DEVEL_SPACE = 'devel' DEFAULT_INSTALL_SPACE = 'install' - STORED_KEYS = ['extend_path', - 'source_space', - 'build_space', - 'devel_space', - 'install_space', - 'isolate_devel', - 'install', - 'isolate_install', - 'cmake_args', - 'make_args', - 'use_internal_make_jobserver', - 'catkin_make_args', - 'whitelist', - 'blacklist'] + STORED_KEYS = [ + 'extend_path', + 'source_space', + 'build_space', + 'devel_space', + 'install_space', + 'isolate_devel', + 'install', + 'isolate_install', + 'cmake_args', + 'make_args', + 'use_internal_make_jobserver', + 'catkin_make_args', + 'whitelist', + 'blacklist', + ] KEYS = STORED_KEYS + [ 'workspace', 'profile', - 'space_suffix'] + 'space_suffix', + ] @classmethod def load( diff --git a/catkin_tools/make_jobserver.py b/catkin_tools/make_jobserver.py index 711620bc..3ab82772 100644 --- a/catkin_tools/make_jobserver.py +++ b/catkin_tools/make_jobserver.py @@ -29,10 +29,10 @@ from catkin_tools.common import log from catkin_tools.common import version_tuple -JOBSERVER_SUPPORT_MAKEFILE = b''' +JOBSERVER_SUPPORT_MAKEFILE = b""" all: \techo $(MAKEFLAGS) | grep -- '--jobserver-fds' -''' +""" def memory_usage(): diff --git a/catkin_tools/runner/run_windows.py b/catkin_tools/runner/run_windows.py index 3d02de12..11e654b6 100644 --- a/catkin_tools/runner/run_windows.py +++ b/catkin_tools/runner/run_windows.py @@ -43,7 +43,7 @@ def run_command(cmd, cwd=None): left_over = lines[-1] try: yield data.decode() - except UnicodeDecodeError as exc: + except UnicodeDecodeError: yield unicode(data, errors='ignore') # Done yield p.returncode diff --git a/catkin_tools/verbs/catkin_build/common.py b/catkin_tools/verbs/catkin_build/common.py index 3774fd57..eef49d28 100644 --- a/catkin_tools/verbs/catkin_build/common.py +++ b/catkin_tools/verbs/catkin_build/common.py @@ -18,12 +18,6 @@ import stat import sys -from multiprocessing import cpu_count - -from catkin_tools.runner import run_command - -from .color import clr - # Due to portability issues, it uses only POSIX-compliant shell features. # This means that there is no support for BASH-like arrays, and special # care needs to be taken in order to preserve argument atomicity when diff --git a/catkin_tools/verbs/catkin_create/__init__.py b/catkin_tools/verbs/catkin_create/__init__.py index 66462774..cdca0f05 100644 --- a/catkin_tools/verbs/catkin_create/__init__.py +++ b/catkin_tools/verbs/catkin_create/__init__.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - from .cli import main from .cli import prepare_arguments diff --git a/catkin_tools/verbs/catkin_list/cli.py b/catkin_tools/verbs/catkin_list/cli.py index 60d2ef94..23e18242 100644 --- a/catkin_tools/verbs/catkin_list/cli.py +++ b/catkin_tools/verbs/catkin_list/cli.py @@ -20,7 +20,6 @@ from catkin_tools.context import Context - from catkin_pkg.packages import find_packages from catkin_pkg.package import InvalidPackage diff --git a/tests/system/verbs/catkin_build/test_args.py b/tests/system/verbs/catkin_build/test_args.py index 59eade05..f83b2a86 100644 --- a/tests/system/verbs/catkin_build/test_args.py +++ b/tests/system/verbs/catkin_build/test_args.py @@ -1,24 +1,12 @@ - from __future__ import print_function import os import shutil -from math import floor - from ...workspace_factory import workspace_factory -from ....utils import in_temporary_directory -from ....utils import assert_cmd_success -from ....utils import assert_cmd_failure -from ....utils import assert_files_exist from ....utils import catkin_success from ....utils import catkin_failure -from ....utils import redirected_stdio - - -from ....workspace_assertions import assert_workspace_initialized -from ....workspace_assertions import assert_no_warnings TEST_DIR = os.path.dirname(__file__) RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') @@ -29,7 +17,7 @@ def test_cmake_args(): """Test passing CMake args to packages.""" - with workspace_factory() as wf: + with workspace_factory(): shutil.copytree(os.path.join(RESOURCES_DIR, 'catkin_pkgs', 'cmake_args'), 'src') # cmake_args package requires all three vars to be set diff --git a/tests/system/verbs/catkin_build/test_build.py b/tests/system/verbs/catkin_build/test_build.py index e763d192..759b0c57 100644 --- a/tests/system/verbs/catkin_build/test_build.py +++ b/tests/system/verbs/catkin_build/test_build.py @@ -1,17 +1,12 @@ - from __future__ import print_function import os -import shutil from math import floor from ...workspace_factory import workspace_factory from ....utils import in_temporary_directory -from ....utils import assert_cmd_success -from ....utils import assert_cmd_failure -from ....utils import assert_files_exist from ....utils import catkin_success from ....utils import catkin_failure from ....utils import redirected_stdio @@ -86,7 +81,7 @@ def test_build_dry_run(): with redirected_stdio() as (out, err): for build_type in BUILD_TYPES: with workspace_factory() as wf: - n_pkgs = create_tree_workspace(wf, build_type, 3) + create_tree_workspace(wf, build_type, 3) wf.build() assert catkin_success(BUILD + ['--dry-run']) assert not os.path.exists('build') diff --git a/tests/system/verbs/catkin_build/test_bwlists.py b/tests/system/verbs/catkin_build/test_bwlists.py index 8c9c4267..e47b9eca 100644 --- a/tests/system/verbs/catkin_build/test_bwlists.py +++ b/tests/system/verbs/catkin_build/test_bwlists.py @@ -1,24 +1,6 @@ - from __future__ import print_function import os -import shutil - -from math import floor - -from ...workspace_factory import workspace_factory - -from ....utils import in_temporary_directory -from ....utils import assert_cmd_success -from ....utils import assert_cmd_failure -from ....utils import assert_files_exist -from ....utils import catkin_success -from ....utils import catkin_failure -from ....utils import redirected_stdio - - -from ....workspace_assertions import assert_workspace_initialized -from ....workspace_assertions import assert_no_warnings TEST_DIR = os.path.dirname(__file__) RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') diff --git a/tests/system/verbs/catkin_build/test_context.py b/tests/system/verbs/catkin_build/test_context.py index e8bc76f5..9ea75832 100644 --- a/tests/system/verbs/catkin_build/test_context.py +++ b/tests/system/verbs/catkin_build/test_context.py @@ -1,24 +1,6 @@ - from __future__ import print_function import os -import shutil - -from math import floor - -from ...workspace_factory import workspace_factory - -from ....utils import in_temporary_directory -from ....utils import assert_cmd_success -from ....utils import assert_cmd_failure -from ....utils import assert_files_exist -from ....utils import catkin_success -from ....utils import catkin_failure -from ....utils import redirected_stdio - - -from ....workspace_assertions import assert_workspace_initialized -from ....workspace_assertions import assert_no_warnings TEST_DIR = os.path.dirname(__file__) RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') diff --git a/tests/system/verbs/catkin_build/test_eclipse.py b/tests/system/verbs/catkin_build/test_eclipse.py index a5729a17..c0f6a3e4 100644 --- a/tests/system/verbs/catkin_build/test_eclipse.py +++ b/tests/system/verbs/catkin_build/test_eclipse.py @@ -1,15 +1,10 @@ - from __future__ import print_function import os -import shutil from ....utils import in_temporary_directory -from ....utils import assert_cmd_success -from ....utils import assert_cmd_failure from ....utils import assert_files_exist from ....utils import catkin_success -from ....utils import catkin_failure from ....utils import redirected_stdio diff --git a/tests/system/verbs/catkin_build/test_modify_ws.py b/tests/system/verbs/catkin_build/test_modify_ws.py index f4cb5446..2b05034d 100644 --- a/tests/system/verbs/catkin_build/test_modify_ws.py +++ b/tests/system/verbs/catkin_build/test_modify_ws.py @@ -1,24 +1,6 @@ - from __future__ import print_function import os -import shutil - -from math import floor - -from ...workspace_factory import workspace_factory - -from ....utils import in_temporary_directory -from ....utils import assert_cmd_success -from ....utils import assert_cmd_failure -from ....utils import assert_files_exist -from ....utils import catkin_success -from ....utils import catkin_failure -from ....utils import redirected_stdio - - -from ....workspace_assertions import assert_workspace_initialized -from ....workspace_assertions import assert_no_warnings TEST_DIR = os.path.dirname(__file__) RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') diff --git a/tests/system/verbs/catkin_build/test_unit_tests.py b/tests/system/verbs/catkin_build/test_unit_tests.py index 9d1c01a2..33e83784 100644 --- a/tests/system/verbs/catkin_build/test_unit_tests.py +++ b/tests/system/verbs/catkin_build/test_unit_tests.py @@ -1,4 +1,3 @@ - from __future__ import print_function import os @@ -7,15 +6,9 @@ from ....utils import in_temporary_directory from ....utils import assert_cmd_success from ....utils import assert_cmd_failure -from ....utils import assert_files_exist from ....utils import catkin_success -from ....utils import catkin_failure from ....utils import redirected_stdio - -from ....workspace_assertions import assert_workspace_initialized -from ....workspace_assertions import assert_no_warnings - TEST_DIR = os.path.dirname(__file__) RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') diff --git a/tests/system/verbs/catkin_build/test_whitespace_in_paths.py b/tests/system/verbs/catkin_build/test_whitespace_in_paths.py index cecdfe6c..95f60ab0 100644 --- a/tests/system/verbs/catkin_build/test_whitespace_in_paths.py +++ b/tests/system/verbs/catkin_build/test_whitespace_in_paths.py @@ -1,9 +1,6 @@ import os -from catkin_tools.commands.catkin import main - from ....utils import catkin_success -from ....utils import catkin_failure from ...workspace_factory import workspace_factory diff --git a/tests/system/workspace_factory.py b/tests/system/workspace_factory.py index 9716568f..05d055a0 100644 --- a/tests/system/workspace_factory.py +++ b/tests/system/workspace_factory.py @@ -100,7 +100,15 @@ def add_package(self, pkg_name, package_path): """Copy a static package into the workspace""" shutil.copytree(package_path, self.source_space) - def create_package(self, pkg_name, build_type='cmake', depends=None, build_depends=None, run_depends=None, test_depends=None): + def create_package( + self, + pkg_name, + build_type='cmake', + depends=None, + build_depends=None, + run_depends=None, + test_depends=None + ): """Add a package to be generated in this workspace.""" self.packages[pkg_name] = self.Package(pkg_name, build_type, depends, build_depends, run_depends, test_depends) diff --git a/tests/utils.py b/tests/utils.py index 58e185fb..138cc5ab 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,4 +1,3 @@ - from __future__ import print_function import functools diff --git a/tests/workspace_assertions.py b/tests/workspace_assertions.py index bbe9f91f..2ae912fe 100644 --- a/tests/workspace_assertions.py +++ b/tests/workspace_assertions.py @@ -1,4 +1,3 @@ - from __future__ import print_function import re From aadf1399ed8ad8b81c7bc53ae07e10087324c60c Mon Sep 17 00:00:00 2001 From: Jonathan Bohren Date: Mon, 14 Dec 2015 20:57:50 -0500 Subject: [PATCH 11/11] execution: Adding new asyncio-based execution. --- .travis.before_install.bash | 17 + .travis.before_script.bash | 10 + .travis.yml | 35 +- catkin_tools/argument_parsing.py | 48 +- catkin_tools/common.py | 76 +- catkin_tools/execution/__init__.py | 0 catkin_tools/execution/controllers.py | 678 +++++++++++++++ catkin_tools/execution/events.py | 46 + catkin_tools/execution/executor.py | 337 ++++++++ catkin_tools/execution/io.py | 197 +++++ catkin_tools/execution/job_server.py | 433 ++++++++++ catkin_tools/execution/jobs.py | 56 ++ catkin_tools/execution/stages.py | 81 ++ catkin_tools/jobs/__init__.py | 0 catkin_tools/jobs/catkin.py | 138 +++ catkin_tools/jobs/cmake.py | 343 ++++++++ catkin_tools/jobs/commands/__init__.py | 0 catkin_tools/jobs/commands/cmake.py | 152 ++++ .../__init__.py => jobs/commands/make.py} | 9 +- catkin_tools/jobs/job.py | 247 ++++++ .../{verbs/catkin_build => jobs}/output.py | 0 catkin_tools/make_jobserver.py | 318 ------- catkin_tools/resultspace.py | 33 +- catkin_tools/runner/run_unix.py | 74 -- catkin_tools/runner/run_windows.py | 49 -- catkin_tools/verbs/catkin_build/.build.py.swn | Bin 0 -> 40960 bytes catkin_tools/verbs/catkin_build/build.py | 791 +++++++----------- catkin_tools/verbs/catkin_build/cli.py | 113 ++- catkin_tools/verbs/catkin_build/color.py | 31 - catkin_tools/verbs/catkin_build/common.py | 141 ---- catkin_tools/verbs/catkin_build/executor.py | 189 ----- catkin_tools/verbs/catkin_build/job.py | 340 -------- setup.py | 5 + 33 files changed, 3249 insertions(+), 1738 deletions(-) create mode 100755 .travis.before_install.bash create mode 100755 .travis.before_script.bash create mode 100644 catkin_tools/execution/__init__.py create mode 100644 catkin_tools/execution/controllers.py create mode 100644 catkin_tools/execution/events.py create mode 100644 catkin_tools/execution/executor.py create mode 100644 catkin_tools/execution/io.py create mode 100644 catkin_tools/execution/job_server.py create mode 100644 catkin_tools/execution/jobs.py create mode 100644 catkin_tools/execution/stages.py create mode 100644 catkin_tools/jobs/__init__.py create mode 100644 catkin_tools/jobs/catkin.py create mode 100644 catkin_tools/jobs/cmake.py create mode 100644 catkin_tools/jobs/commands/__init__.py create mode 100644 catkin_tools/jobs/commands/cmake.py rename catkin_tools/{runner/__init__.py => jobs/commands/make.py} (81%) create mode 100644 catkin_tools/jobs/job.py rename catkin_tools/{verbs/catkin_build => jobs}/output.py (100%) delete mode 100644 catkin_tools/make_jobserver.py delete mode 100644 catkin_tools/runner/run_unix.py delete mode 100644 catkin_tools/runner/run_windows.py create mode 100644 catkin_tools/verbs/catkin_build/.build.py.swn delete mode 100644 catkin_tools/verbs/catkin_build/common.py delete mode 100644 catkin_tools/verbs/catkin_build/executor.py delete mode 100644 catkin_tools/verbs/catkin_build/job.py diff --git a/.travis.before_install.bash b/.travis.before_install.bash new file mode 100755 index 00000000..f52303c4 --- /dev/null +++ b/.travis.before_install.bash @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +if [ "$TRAVIS_OS_NAME" == "linux" ]; then + echo "AOK" + #if [ "$PYTHON" == "/usr/bin/python3.4" ]; then + #sudo add-apt-repository ppa:fkrull/deadsnakes -y + #sudo apt-get update + #sudo apt-get install python3.4 python3-dev + #fi +elif [ "$TRAVIS_OS_NAME" == "osx" ]; then + if [ "$PYTHON" == "/usr/local/bin/python3" ]; then + brew install python3 + fi + sudo pip install virtualenv + virtualenv -p $PYTHON venv + source venv/bin/activate +fi diff --git a/.travis.before_script.bash b/.travis.before_script.bash new file mode 100755 index 00000000..b2ee50c5 --- /dev/null +++ b/.travis.before_script.bash @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +if [ "$TRAVIS_OS_NAME" == "linux" ]; then + sudo apt-get install cmake libgtest-dev build-essential python-setuptools python-pip + pip install trollius +elif [ "$TRAVIS_OS_NAME" == "osx" ]; then + # noop? + pip install setuptools + pip install trollius +fi diff --git a/.travis.yml b/.travis.yml index e992ce28..c172898b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,12 +1,35 @@ -language: python -python: - - "2.7" - - "3.3" +# Travis auto-virtualenv isn't supported on OS X +#language: python +#python: + #- "2.7" + #- "3.3" +language: generic +matrix: + include: + - python: 2.7 + language: python + python: "2.7" + os: linux + env: PYTHON=/usr/bin/python2.7 + - python: 3.4 + language: python + python: "3.4" + os: linux + env: PYTHON=/usr/bin/python3.4 + - python: 2 + os: osx + env: PYTHON=/usr/bin/python + - python: 3 + os: osx + env: PYTHON=/usr/local/bin/python3 +before_install: + - source .travis.before_install.bash install: - pip install argparse catkin-pkg distribute PyYAML psutil - - pip install nose coverage flake8 --upgrade + - pip install nose coverage flake8 mock --upgrade before_script: - - sudo apt-get install cmake python-setuptools libgtest-dev build-essential + - ./.travis.before_script.bash + - pip install git+https://github.com/osrf/osrf_pycommon.git - pip install empy --upgrade - git clone https://github.com/ros/catkin.git /tmp/catkin_source - mkdir /tmp/catkin_source/build diff --git a/catkin_tools/argument_parsing.py b/catkin_tools/argument_parsing.py index 119fdd42..814e1656 100644 --- a/catkin_tools/argument_parsing.py +++ b/catkin_tools/argument_parsing.py @@ -14,6 +14,7 @@ from __future__ import print_function +import argparse import os import re import sys @@ -22,9 +23,7 @@ from catkin_tools.common import wide_log -from catkin_tools.make_jobserver import initialize_jobserver -from catkin_tools.make_jobserver import jobserver_arguments -from catkin_tools.make_jobserver import jobserver_supported +import catkin_tools.execution.job_server as job_server def add_context_args(parser): @@ -60,10 +59,12 @@ def add_cmake_and_make_and_catkin_make_args(parser): """ add = parser.add_argument - add('-p', '--parallel-packages', '--parallel-jobs', '--parallel', dest='parallel_jobs', default=None, - help='Maximum number of packages which could be built in parallel (default is cpu count)') add('-j', '--jobs', default=None, - help='Limit parallel job count through the internal GNU make job server. default is cpu count') + help='Maximum number of build jobs to be distributed across active packages. (default is cpu count)') + add('-p', '--parallel-packages', metavar='PACKAGE_JOBS', dest='parallel_jobs', default=None, + help='Maximum number of packages allowed to be built in parallel (default is cpu count)') + # Deprecated flags kept for compatibility + add('--parallel-jobs', '--parallel', action='store_true', dest='parallel_jobs', help=argparse.SUPPRESS) add = parser.add_mutually_exclusive_group().add_argument add('--jobserver', dest='use_internal_make_jobserver', default=None, action='store_true', @@ -247,6 +248,10 @@ def handle_make_arguments( # Get the values for the jobs flags which may be in the make args jobs_dict = extract_jobs_flags_values(' '.join(make_args)) + jobs_args = extract_jobs_flags(' '.join(make_args)) + if len(jobs_args) > 0: + # Remove jobs flags from cli args if they're present + make_args = re.sub(' '.join(jobs_args), '', ' '.join(make_args)).split() if force_single_threaded_when_running_tests: # force single threaded execution when running test since rostest does not support multiple parallel runs @@ -255,8 +260,8 @@ def handle_make_arguments( wide_log('Forcing "-j1" for running unit tests.') jobs_dict['jobs'] = 1 - if len(jobs_dict) == 0: - make_args.extend(jobserver_arguments()) + if job_server.gnu_make_enabled(): + make_args.extend(job_server.gnu_make_args()) else: if 'jobs' in jobs_dict: make_args.append('-j{0}'.format(jobs_dict['jobs'])) @@ -282,7 +287,7 @@ def configure_make_args(make_args, use_internal_make_jobserver): n_cpus = cpu_count() jobs_flags = { 'jobs': n_cpus, - 'load-average': n_cpus} + 'load-average': n_cpus + 1} except NotImplementedError: # If the number of cores cannot be determined, limit to one job jobs_flags = { @@ -292,29 +297,31 @@ def configure_make_args(make_args, use_internal_make_jobserver): # Get MAKEFLAGS from environment makeflags_jobs_flags = extract_jobs_flags(os.environ.get('MAKEFLAGS', '')) using_makeflags_jobs_flags = len(makeflags_jobs_flags) > 0 - jobs_flags.update(extract_jobs_flags_values(' '.join(makeflags_jobs_flags))) + if using_makeflags_jobs_flags: + makeflags_jobs_flags_dict = extract_jobs_flags_values(' '.join(makeflags_jobs_flags)) + jobs_flags.update(makeflags_jobs_flags_dict) # Extract make jobs flags (these override MAKEFLAGS) cli_jobs_flags = extract_jobs_flags(' '.join(make_args)) using_cli_flags = len(cli_jobs_flags) > 0 - jobs_flags.update(extract_jobs_flags_values(' '.join(cli_jobs_flags))) if cli_jobs_flags: + jobs_flags.update(extract_jobs_flags_values(' '.join(cli_jobs_flags))) # Remove jobs flags from cli args if they're present make_args = re.sub(' '.join(cli_jobs_flags), '', ' '.join(make_args)).split() - # Instantiate a jobserver - if use_internal_make_jobserver: - initialize_jobserver( - num_jobs=jobs_flags.get('jobs', None), - max_load=jobs_flags.get('load-average', None)) + # Instantiate the jobserver + job_server.initialize( + max_jobs=jobs_flags.get('jobs', None), + max_load=jobs_flags.get('load-average', None), + gnu_make_enabled=use_internal_make_jobserver) # If the jobserver is supported - if jobserver_supported(): + if job_server.gnu_make_enabled(): jobs_args = [] else: jobs_args = cli_jobs_flags - return make_args + jobs_args, using_makeflags_jobs_flags, using_cli_flags, jobserver_supported() + return make_args + jobs_args, using_makeflags_jobs_flags, using_cli_flags, job_server.gnu_make_enabled() def argument_preprocessor(args): @@ -337,13 +344,14 @@ def argument_preprocessor(args): # Extract make jobs flags (these override MAKEFLAGS later on) jobs_args = extract_jobs_flags(' '.join(args)) - if jobs_args: + if len(jobs_args) > 0: # Remove jobs flags from cli args if they're present args = re.sub(' '.join(jobs_args), '', ' '.join(args)).split() extras = { 'cmake_args': cmake_args, - 'make_args': (make_args or []) + (jobs_args or []), + 'make_args': (make_args or []) + jobs_args, 'catkin_make_args': catkin_make_args, } + return args, extras diff --git a/catkin_tools/common.py b/catkin_tools/common.py index 95c2082d..458606f9 100644 --- a/catkin_tools/common.py +++ b/catkin_tools/common.py @@ -17,6 +17,7 @@ import datetime import os import re +import subprocess from catkin_pkg.packages import find_packages @@ -31,23 +32,6 @@ string_type = str -class FakeLock(object): - - """Fake lock used to mimic a Lock but without causing synchronization""" - - def acquire(self, blocking=False): - return True - - def release(self): - pass - - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_value, traceback): - pass - - def getcwd(symlinks=True): """Get the current working directory. @@ -239,6 +223,38 @@ def get_recursive_run_depends_in_workspace(packages, ordered_packages): return recursive_depends +def get_recursive_build_dependants_in_workspace(package_name, ordered_packages): + """Calculates the recursive build dependants of a package which are also in + the ordered_packages + + :param package: package for which the recursive depends should be calculated + :type package: :py:class:`catkin_pkg.package.Package` + :param ordered_packages: packages in the workspace, ordered topologically, + stored as a list of tuples of package path and package object + :type ordered_packages: list(tuple(package path, + :py:class:`catkin_pkg.package.Package`)) + :returns: list of package path, package object tuples which are the + recursive build depends for the given package + :rtype: list(tuple(package path, :py:class:`catkin_pkg.package.Package`)) + """ + workspace_packages_by_name = dict([(pkg.name, (pth, pkg)) for pth, pkg in ordered_packages]) + packages_to_check = set([package_name]) + recursive_dependants = list() + + for pth, pkg in reversed(ordered_packages): + # Break if this is one to check + if pkg.name == package_name: + break + + # Check if this package depends on the target package + deps = get_recursive_build_depends_in_workspace(pkg, ordered_packages) + deps_names = [p.name for _, p in deps] + if package_name in deps_names: + recursive_dependants.insert(0, (pth, pkg)) + + return recursive_dependants + + def is_tty(stream): """Returns True if the given stream is a tty, else False""" return hasattr(stream, 'isatty') and stream.isatty() @@ -277,7 +293,7 @@ def terminal_width_windows(): def terminal_width_linux(): """Returns the estimated width of the terminal on linux""" - width = os.popen('tput cols', 'r').readline() + width = subprocess.Popen('tput cols', shell=True, stdout=subprocess.PIPE, close_fds=False).stdout.readline() return int(width) @@ -330,6 +346,9 @@ def slice_to_printed_length(string, length): if not matches: # If no matches, then set the lookup_array to a plain range lookup_array = range(len(string)) + lookup_array.append(len(string)) + if length > len(lookup_array): + return string return string[:lookup_array[length]] + clr('@|') @@ -426,8 +445,12 @@ def wide_log(msg, **kwargs): :param truncate: If True, messages wider the then terminal will be truncated :type truncate: bool """ - global wide_log_fn - wide_log_fn(msg, **kwargs) + try: + global wide_log_fn + wide_log_fn(msg, **kwargs) + except IOError: + # This happens when someone ctrl-c's during a log message + pass def find_enclosing_package(search_start_path=None, ws_path=None, warnings=None, symlinks=True): @@ -454,3 +477,16 @@ def find_enclosing_package(search_start_path=None, ws_path=None, warnings=None, def version_tuple(v): """Get an integer version tuple from a string.""" return tuple(map(int, (str(v).split(".")))) + + +def mkdir_p(path): + """Equivalent to UNIX mkdir -p""" + if os.path.exists(path): + return + try: + return os.makedirs(path) + except OSError as exc: # Python >2.5 + if exc.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise diff --git a/catkin_tools/execution/__init__.py b/catkin_tools/execution/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/catkin_tools/execution/controllers.py b/catkin_tools/execution/controllers.py new file mode 100644 index 00000000..2ffb734a --- /dev/null +++ b/catkin_tools/execution/controllers.py @@ -0,0 +1,678 @@ + +try: + # Python3 + from queue import Empty +except ImportError: + # Python2 + from Queue import Empty + +import sys +import threading +import time + +from catkin_tools.common import disable_wide_log +from catkin_tools.common import format_time_delta +from catkin_tools.common import format_time_delta_short +from catkin_tools.common import remove_ansi_escape +from catkin_tools.common import terminal_width +from catkin_tools.common import wide_log + +from catkin_tools.notifications import notify + +from catkin_tools.terminal_color import fmt +from catkin_tools.terminal_color import sanitize +from catkin_tools.terminal_color import ColorMapper + +from catkin_tools.execution import job_server + +# This map translates more human reable format strings into colorized versions +_color_translation_map = { + # 'output': 'colorized_output' + + '': fmt('@!' + sanitize('') + '@|'), + + # Job starting + "Starting >>> {:<{}}": + fmt("Starting @!@{gf}>>>@| @!@{cf}{:<{}}@|"), + + # Job finishing + "Finished <<< {:<{}} [ {} ]": + fmt("@!@{kf}Finished@| @{gf}<<<@| @{cf}{:<{}}@| [ @{yf}{}@| ]"), + + "Failed <<< {:<{}} [ {} ]": + fmt("@!@{rf}Failed@| @{rf}<<<@| @{cf}{:<{}}@| [ @{yf}{}@| ]"), + + # Job abandoning + "Abandoned <<< {:<{}} [ {} ]": + fmt("@!@{rf}Abandoned@| @{rf}<<<@| @{cf}{:<{}}@| [ @{yf}{}@| ]"), + + "Depends on failed job {}": + fmt("@{yf}Depends on failed job @!{}@|"), + + "Depends on failed job {} via {}": + fmt("@{yf}Depends on failed job @!{}@| @{yf}via @!{}@|"), + + # Stage finishing + "Starting >> {}:{}": + fmt("Starting @{gf} >>@| @{cf}{}@|:@{bf}{}@|"), + + "Subprocess > {}:{} `cd {} && {}`": + fmt("Subprocess @!@{gf}>@| @{cf}{}@|:@{bf}{}@| @!@{kf}`cd {} && {}`@|"), + + "Finished << {}:{}": + fmt("@!@{kf}Finished@| @{gf} <<@| @{cf}{}@|:@{bf}{}@|"), + + "Failed << {}:{:<{}} [ Exited with code {} ]": + fmt("@!@{rf}Failed@| @{rf} <<@| @{cf}{}@|:@{bf}{:<{}}@|[ @{yf}Exited with code @!@{yf}{}@| ]"), + + "Output << {}:{} {}": + fmt("@!@{kf}Output@| @!@{kf} <<@| @{cf}{}@|:@{bf}{}@| @!@{kf}{}@|"), + + "Warnings << {}:{} {}": + fmt("@!@{yf}Warnings@| @{yf} <<@| @{cf}{}@|:@{bf}{}@| @!@{yf}{}@|"), + + "Errors << {}:{} {}": + fmt("@!@{rf}Errors@| @{rf} <<@| @{cf}{}@|:@{bf}{}@| @!@{rf}{}@|"), + + # Interleaved + "[{}:{}] ": + fmt("[@{cf}{}@|:@{bf}{}@|] "), + + # Status line + "[{} {} s] [{}/{} complete] [{}/{} jobs] [{} queued]": + fmt("[@{pf}{}@| - @{yf}{}@|] [@!@{gf}{}@|/@{gf}{}@| complete] [@!@{gf}{}@|/@{gf}{}@| jobs] [@!@{kf}{}@| queued]"), + + "[{}:{} - {}]": + fmt("[@{cf}{}@|:@{bf}{}@| - @{yf}{}@|]"), + + "[{}:{} ({}%) - {}]": + fmt("[@{cf}{}@|:@{bf}{}@| @{bf}({}%)@| - @{yf}{}@|]"), + + # Summary + "[{}] Summary: All {} jobs completed successfully!": + fmt("[{}] @/@!Summary:@| @/All @!{}@| @/jobs completed successfully!@|"), + + "[{}] Summary: {} of {} jobs completed.": + fmt("[{}] @/@!@{yf}Summary:@| @/@!@{yf}{}@| @/@{yf}of @!@{yf}{}@| @/@{yf}jobs completed.@|"), + + "[{}] Warnings: No completed jobs produced warnings.": + fmt("[{}] @/@!@{kf}Warnings: None.@|"), + + "[{}] Warnings: {} completed jobs produced warnings.": + fmt("[{}] @/@!@{yf}Warnings:@| @/@!{}@| @/completed jobs produced warnings.@|"), + + "[{}] Skipped: None.": + fmt("[{}] @/@!@{kf}Skipped: None.@|"), + + "[{}] Skipped: {} jobs skipped.": + fmt("[{}] @/@!@{yf}Skipped:@| @/@!{}@| @/jobs skipped.@|"), + + "[{}] Failed: No jobs failed.": + fmt("[{}] @/@!@{kf}Failed: None.@|"), + + "[{}] Failed: {} jobs failed.": + fmt("[{}] @/@!@{rf}Failed:@| @/@!{}@| @/jobs failed.@|"), + + "[{}] Abandoned: No jobs were abandoned.": + fmt("[{}] @/@!@{kf}Abandoned: None.@|"), + + "[{}] Abandoned: {} jobs were abandoned.": + fmt("[{}] @/@!@{rf}Abandoned:@| @/@!{}@| @/jobs were abandoned.@|"), + + "[{}] - {}": + fmt("[{}] @{cf}{}@|"), + + "[{}] Runtime: {} total.": + fmt("[{}] @/@!Runtime:@| @/{} total.@|") +} + +color_mapper = ColorMapper(_color_translation_map) + +clr = color_mapper.clr + + +class ConsoleStatusController(threading.Thread): + + """Status thread for displaying events to the console. + + + TODO: switch to interleaved output if only one job is running + """ + + def __init__( + self, + label, + job_labels, + jobs, + event_queue, + show_notifications=False, + show_stage_events=False, + show_buffered_stdout=False, + show_buffered_stderr=True, + show_live_stdout=False, + show_live_stderr=False, + show_compact_io=False, + show_active_status=True, + show_summary=True, + show_full_summary=False, + active_status_rate=10.0, + pre_start_time=None): + """ + :param label: The label for this task (build, clean, etc) + :param job_labels: The labels to be used for the jobs (packages, tests, etc) + :param event_queue: The event queue used by an Executor + :param show_notifications: Show a libnotify notification when the jobs are finished + :param show_stage_events: Show events relating to stages in each job + :param show_buffered_stdout: Show stdout from jobs as they finish + :param show_buffered_stderr: Show stderr from jobs as they finish + :param show_live_stdout: Show stdout lines from jobs as they're generated + :param show_live_stderr: Show stdout lines from jobs as they're generated + :param show_compact_io: Don't print blank lines from redirected io + :param show_active_status: Periodically show a status line displaying the active jobs + :param show_summary: Show numbers of jobs that completed with errors and warnings + :param show_full_summary: Show lists of jobs in each termination category + :param active_status_rate: The rate in Hz at which the status line should be printed + :param pre_start_time: The actual start time to report, if preprocessing was done + """ + super(ConsoleStatusController, self).__init__() + + self.label = label + self.job_label = job_labels[0] + self.jobs_label = job_labels[1] + self.event_queue = event_queue + + self.show_notifications = show_notifications + self.show_stage_events = show_stage_events + self.show_buffered_stdout = show_buffered_stdout + self.show_buffered_stderr = show_buffered_stderr + self.show_live_stdout = show_live_stdout + self.show_live_stderr = show_live_stderr + self.show_compact_io = show_compact_io + self.show_active_status = show_active_status + self.show_full_summary = show_full_summary + self.show_summary = show_summary + self.active_status_rate = active_status_rate + self.pre_start_time = pre_start_time + + self.keep_running = True + + # Map from jid -> job + self.jobs = dict([(j.jid, j) for j in jobs]) + + # Compute the max job id length when combined with stage labels + self.max_jid_length = 1 + \ + max([len(jid) + max([len(s.label) for s in job.stages] or [0]) for jid, job in self.jobs.items()]) + + def run(self): + pending_jobs = [] + queued_jobs = [] + active_jobs = [] + completed_jobs = {} + abandoned_jobs = [] + failed_jobs = [] + warned_jobs = [] + + cumulative_times = dict() + start_times = dict() + end_times = dict() + active_stages = dict() + + start_time = self.pre_start_time or time.time() + + # Disable the wide log padding if the status is disabled + if not self.show_active_status: + disable_wide_log() + + while True: + # Check if we should stop + if not self.keep_running: + wide_log(clr('[{}] An internal error occurred!').format(self.label)) + return + # Write a continuously-updated status line + if self.show_active_status: + # Try to get an event from the queue (non-blocking) + try: + event = self.event_queue.get(False) + except Empty: + # Print live status (overwrites last line) + status_line = clr('[{} {} s] [{}/{} complete] [{}/{} jobs] [{} queued]').format( + self.label, + format_time_delta_short(time.time() - start_time), + len(completed_jobs), + len(self.jobs), + job_server.running_jobs(), + job_server.max_jobs(), + len(queued_jobs) + len(active_jobs) - len(active_stages) + ) + + # Show failed jobs + if len(failed_jobs) > 0: + status_line += clr(' [@!@{rf}{}@| @{rf}failed@|]').format(len(failed_jobs)) + + # Check load / mem + if not job_server.load_ok(): + status_line += clr(' [@!@{rf}High Load@|]') + if not job_server.mem_ok(): + status_line += clr(' [@!@{rf}Low Memory@|]') + + # Add active jobs + if len(active_jobs) == 0: + status_line += clr(' @/@!@{kf}Waiting for jobs...@|') + else: + active_labels = [] + + for j, (s, t, p) in active_stages.items(): + d = format_time_delta_short(cumulative_times[j] + time.time() - t) + if p == '': + active_labels.append(clr('[{}:{} - {}]').format(j, s, d)) + else: + active_labels.append(clr('[{}:{} ({}%) - {}]').format(j, s, p, d)) + + status_line += ' ' + ' '.join(active_labels) + + # Print the status line + # wide_log(status_line) + wide_log(status_line, rhs='', end='\r') + sys.stdout.flush() + if self.active_status_rate > 1E-5: + time.sleep(1.0 / self.active_status_rate) + continue + else: + # Try to get an event from the queue (blocking) + try: + event = self.event_queue.get(True) + except Empty: + break + + # A `None` event is a signal to terminate + if event is None: + break + + # Handle the received events + eid = event.event_id + + if 'JOB_STATUS' == eid: + pending_jobs = event.data['pending'] + queued_jobs = event.data['queued'] + active_jobs = event.data['active'] + completed_jobs = event.data['completed'] + abandoned_jobs = event.data['abandoned'] + + # Check if all jobs have finished in some way + if all([len(event.data[t]) == 0 for t in ['pending', 'queued', 'active']]): + break + + elif 'STARTED_JOB' == eid: + cumulative_times[event.data['job_id']] = 0.0 + wide_log(clr('Starting >>> {:<{}}').format( + event.data['job_id'], + self.max_jid_length)) + + elif 'FINISHED_JOB' == eid: + duration = format_time_delta(cumulative_times[event.data['job_id']]) + + if event.data['succeeded']: + wide_log(clr('Finished <<< {:<{}} [ {} ]').format( + event.data['job_id'], + self.max_jid_length, + duration)) + else: + failed_jobs.append(event.data['job_id']) + wide_log(clr('Failed <<< {:<{}} [ {} ]').format( + event.data['job_id'], + self.max_jid_length, + duration)) + + elif 'ABANDONED_JOB' == eid: + # Create a human-readable reason string + if 'DEP_FAILED' == event.data['reason']: + direct = event.data['dep_job_id'] == event.data['direct_dep_job_id'] + if direct: + reason = clr('Depends on failed job {}').format(event.data['dep_job_id']) + else: + reason = clr('Depends on failed job {} via {}').format( + event.data['dep_job_id'], + event.data['direct_dep_job_id']) + elif 'PEER_FAILED' == event.data['reason']: + reason = clr('Unrelated job failed') + elif 'MISSING_DEPS' == event.data['reason']: + reason = clr('Depends on unknown jobs: {}').format( + ', '.join([clr('@!{}@|').format(jid) for jid in event.data['dep_ids']])) + + wide_log(clr('Abandoned <<< {:<{}} [ {} ]').format( + event.data['job_id'], + self.max_jid_length, + reason)) + + elif 'STARTED_STAGE' == eid: + active_stages[event.data['job_id']] = [event.data['stage_label'], event.time, ''] + start_times[event.data['job_id']] = event.time + + if self.show_stage_events: + wide_log(clr('Starting >> {}:{}').format( + event.data['job_id'], + event.data['stage_label'])) + + elif 'STAGE_PROGRESS' == eid: + active_stages[event.data['job_id']][2] = event.data['percent'] + + elif 'SUBPROCESS' == eid: + if self.show_stage_events: + wide_log(clr('Subprocess > {}:{} `cd {} && {}`').format( + event.data['job_id'], + event.data['stage_label'], + event.data['cwd'], + ' '.join(event.data['cmd']))) + + elif 'FINISHED_STAGE' == eid: + # Get the stage duration + duration = event.time - start_times[event.data['job_id']] + cumulative_times[event.data['job_id']] += duration + + # This is no longer the active stage for this job + del active_stages[event.data['job_id']] + + header_border = None + header_title = None + lines = [] + footer_title = None + footer_border = None + + # Generate headers / borders for output + if event.data['succeeded']: + footer_title = clr( + 'Finished << {}:{}').format( + event.data['job_id'], + event.data['stage_label']) + + if len(event.data['stderr']) > 0: + # Mark that this job warned about something + if event.data['job_id'] not in warned_jobs: + warned_jobs.append(event.data['job_id']) + + # Output contains warnings + header_border = clr('@!@{yf}' + '_' * (terminal_width() - 1) + '@|') + header_title = clr( + 'Warnings << {}:{} {}').format( + event.data['job_id'], + event.data['stage_label'], + event.data['logfile_filename']) + footer_border = clr('@{yf}' + '.' * (terminal_width() - 1) + '@|') + else: + # Normal output, no warnings + header_title = clr( + 'Output << {}:{} {}').format( + event.data['job_id'], + event.data['stage_label'], + event.data['logfile_filename']) + + # Don't print footer title + if not self.show_stage_events: + footer_title = None + else: + # Output contains errors + header_border = clr('@!@{rf}' + '_' * (terminal_width() - 1) + '@|') + header_title = clr( + 'Errors << {}:{} {}').format( + event.data['job_id'], + event.data['stage_label'], + event.data['logfile_filename']) + footer_border = clr('@{rf}' + '.' * (terminal_width() - 1) + '@|') + + footer_title = clr( + 'Failed << {}:{:<{}} [ Exited with code {} ]').format( + event.data['job_id'], + event.data['stage_label'], + max(0, self.max_jid_length - len(event.data['job_id'])), + event.data['retcode']) + + if self.show_buffered_stdout: + if len(event.data['interleaved']) > 0: + lines = [l for l in event.data['interleaved'].decode( + 'utf-8').splitlines() if (self.show_compact_io is False or len(l.strip()) > 0)] + else: + header_border = None + header_title = None + footer_border = None + elif self.show_buffered_stderr: + if len(event.data['stderr']) > 0: + lines = [l for l in event.data['stderr'].decode( + 'utf-8').splitlines() if (self.show_compact_io is False or len(l.strip()) > 0)] + else: + header_border = None + header_title = None + footer_border = None + + # Print the output + if header_border: + wide_log(header_border) + if header_title: + wide_log(header_title) + if len(lines) > 0: + wide_log('\n'.join(lines)) + if footer_border: + wide_log(footer_border) + if footer_title: + wide_log(footer_title) + + elif 'STDERR' == eid: + if self.show_live_stderr: + prefix = clr('[{}:{}] ').format( + event.data['job_id'], + event.data['stage_label']) + wide_log(''.join(prefix + l for l in event.data['data'].splitlines(True))) + + elif 'STDOUT' == eid: + if self.show_live_stdout: + prefix = clr('[{}:{}] ').format( + event.data['job_id'], + event.data['stage_label']) + wide_log(''.join(prefix + l for l in event.data['data'].splitlines(True))) + + elif 'MESSAGE' == eid: + wide_log(event.data['msg']) + + if not self.show_summary: + return + + # Print final runtime + wide_log(clr('[{}] Runtime: {} total.').format( + self.label, + format_time_delta(time.time() - start_time))) + + # Print error summary + if len(completed_jobs) == len(self.jobs) and all(completed_jobs.items()) and len(failed_jobs) == 0: + if self.show_notifications: + notify("{} Succeded".format(self.label.capitalize()), + "{} {} completed with no warnings.".format( + len(completed_jobs), self.jobs_label)) + + wide_log(clr('[{}] Summary: All {} jobs completed successfully!').format(self.label, len(self.jobs))) + else: + wide_log(clr('[{}] Summary: {} of {} jobs completed.').format( + self.label, + len([succeeded for jid, succeeded in completed_jobs.items() if succeeded]), + len(self.jobs))) + + if len(warned_jobs) == 0: + wide_log(clr('[{}] Warnings: No completed jobs produced warnings.').format( + self.label)) + else: + if self.show_notifications: + notify("{} Produced Warnings".format(self.label.capitalize()), + "{} {} completed with warnings.".format( + len(warned_jobs), self.jobs_label)) + + wide_log(clr('[{}] Warnings: {} completed jobs produced warnings.').format( + self.label, + len(warned_jobs))) + if self.show_full_summary: + for jid in warned_jobs: + wide_log(clr('[{}] - {}').format( + self.label, + jid)) + + all_abandoned_jobs = [j for j in self.jobs if j not in completed_jobs] + if len(all_abandoned_jobs) == 0: + wide_log(clr('[{}] Abandoned: No jobs were abandoned.').format( + self.label)) + else: + if self.show_notifications: + notify("{} Incomplete".format(self.label.capitalize()), + "{} {} were abandoned.".format( + len(all_abandoned_jobs), self.jobs_label)) + + wide_log(clr('[{}] Abandoned: {} jobs were abandoned.').format( + self.label, + len(all_abandoned_jobs))) + if self.show_full_summary: + for jid in all_abandoned_jobs: + wide_log(clr('[{}] - {}').format( + self.label, + jid)) + + if len(failed_jobs) == 0: + wide_log(clr('[{}] Failed: No jobs failed.').format( + self.label)) + else: + if self.show_notifications: + notify("{} Failed".format(self.label.capitalize()), + "{} {} failed.".format( + len(failed_jobs), self.jobs_label)) + + wide_log(clr('[{}] Failed: {} jobs failed.').format( + self.label, + len(failed_jobs))) + if self.show_full_summary: + for jid in failed_jobs: + wide_log(clr('[{}] - {}').format( + self.label, + jid)) + + +def print_error_summary(verb, errors, no_notify, log_dir): + wide_log(clr("[" + verb + "] There were '" + str(len(errors)) + "' @!@{rf}errors@|:")) + if not no_notify: + notify("Build Failed", "there were {0} errors".format(len(errors))) + for error in errors: + if error.event_type == 'exit': + wide_log("""\ +Executor '{exec_id}' had an unhandled exception while processing package '{package}': + +{data[exc]}""".format(exec_id=error.executor_id + 1, **error.__dict__)) + else: + wide_log(clr(""" +@{rf}Failed@| to build package '@{cf}{package}@|' because the following command: + +@!@{kf}# Command to reproduce:@| +cd {location} && {cmd.cmd_str}; cd - + +@!@{kf}# Path to log:@| +cat {log_dir} + +@{rf}Exited@| with return code: @!{retcode}@|""").format(package=error.package, + log_dir=os.path.join(log_dir, error.package + '.log'), + **error.data)) + + +def print_items_in_columns(items_in, number_of_columns): + number_of_items_in_line = 0 + line_template = "{}" * number_of_columns + line_items = [] + items = list(items_in) + while items: + line_items.append(items.pop(0)) + number_of_items_in_line += 1 + if number_of_items_in_line == number_of_columns: + wide_log(line_template.format(*line_items)) + line_items = [] + number_of_items_in_line = 0 + if line_items: + wide_log(("{}" * len(line_items)).format(*line_items)) + + +def print_build_summary(context, packages_to_be_built, completed_packages, failed_packages): + # Calculate the longest package name + max_name_len = max([len(pkg.name) for _, pkg in context.packages]) + + def get_template(template_name, column_width): + templates = { + 'successful': " @!@{gf}Successful@| @{cf}{package:<" + str(column_width) + "}@|", + 'failed': " @!@{rf}Failed@| @{cf}{package:<" + str(column_width) + "}@|", + 'not_built': " @!@{kf}Not built@| @{cf}{package:<" + str(column_width) + "}@|", + } + return templates[template_name] + + # Setup templates for comparison + successful_template = get_template('successful', max_name_len) + failed_template = get_template('failed', max_name_len) + not_built_template = get_template('not_built', max_name_len) + # Calculate the maximum _printed_ length for each template + faux_package_name = ("x" * max_name_len) + templates = [ + remove_ansi_escape(clr(successful_template).format(package=faux_package_name)), + remove_ansi_escape(clr(failed_template).format(package=faux_package_name)), + remove_ansi_escape(clr(not_built_template).format(package=faux_package_name)), + ] + # Calculate the longest column using the longest template + max_column_len = max([len(template) for template in templates]) + # Calculate the number of columns + number_of_columns = (terminal_width() / max_column_len) or 1 + + successfuls = {} + faileds = {} + not_builts = {} + non_whitelisted = {} + blacklisted = {} + + for (_, pkg) in context.packages: + if pkg.name in context.blacklist: + blacklisted[pkg.name] = clr(not_built_template).format(package=pkg.name) + elif len(context.whitelist) > 0 and pkg.name not in context.whitelist: + non_whitelisted[pkg.name] = clr(not_built_template).format(package=pkg.name) + elif pkg.name in completed_packages: + successfuls[pkg.name] = clr(successful_template).format(package=pkg.name) + else: + if pkg.name in failed_packages: + faileds[pkg.name] = clr(failed_template).format(package=pkg.name) + else: + not_builts[pkg.name] = clr(not_built_template).format(package=pkg.name) + + # Combine successfuls and not_builts, sort by key, only take values + wide_log("") + wide_log("Build summary:") + combined = dict(successfuls) + combined.update(not_builts) + non_failed = [v for k, v in sorted(combined.items(), key=operator.itemgetter(0))] + print_items_in_columns(non_failed, number_of_columns) + + # Print out whitelisted packages + if len(non_whitelisted) > 0: + wide_log("") + wide_log("Non-Whitelisted Packages:") + non_whitelisted_list = [v for k, v in sorted(non_whitelisted.items(), key=operator.itemgetter(0))] + print_items_in_columns(non_whitelisted_list, number_of_columns) + + # Print out blacklisted packages + if len(blacklisted) > 0: + wide_log("") + wide_log("Blacklisted Packages:") + blacklisted_list = [v for k, v in sorted(blacklisted.items(), key=operator.itemgetter(0))] + print_items_in_columns(blacklisted_list, number_of_columns) + + # Faileds only, sort by key, only take values + failed = [v for k, v in sorted(faileds.items(), key=operator.itemgetter(0))] + if len(failed) > 0: + wide_log("") + wide_log("Failed packages:") + print_items_in_columns(failed, number_of_columns) + else: + wide_log("") + wide_log("All packages built successfully.") + + wide_log("") + wide_log(clr("[{0}] @!@{gf}Successfully@| built '@!@{cf}{1}@|' packages, " + "@!@{rf}failed@| to build '@!@{cf}{2}@|' packages, " + "and @!@{kf}did not try to build@| '@!@{cf}{3}@|' packages.").format( + len(successfuls), len(faileds), len(not_builts) + )) diff --git a/catkin_tools/execution/events.py b/catkin_tools/execution/events.py new file mode 100644 index 00000000..4a293db5 --- /dev/null +++ b/catkin_tools/execution/events.py @@ -0,0 +1,46 @@ + +import time + + +class ExecutionEvent(object): + + """Structure for events generated by the Executor. + + Events can be jobs starting/finishing, commands starting/failing/finishing, + commands producing output (each line is an event), or when the executor + quits or failes. + """ + + # TODO: Make this a map of ID -> fields + EVENT_IDS = [ + 'JOB_STATUS', # A report of running job states + 'QUEUED_JOB', # A job has been queued to be executed + 'STARTED_JOB', # A job has started to be executed + 'FINISHED_JOB', # A job has finished executing (succeeded or failed) + 'ABANDONED_JOB', # A job has been abandoned for some reason + 'STARTED_STAGE', # A job stage has started to be executed + 'FINISHED_STAGE', # A job stage has finished executing (succeeded or failed) + 'STAGE_PROGRESS', # A job stage has executed partially + 'STDOUT', # A status message from a job + 'STDERR', # A warning or error message from a job + 'SUBPROCESS', # A subprocess has been created + 'MESSAGE' + ] + + def __init__(self, event_id, **kwargs): + """Create a new event. + + :param event_id: One of the valid EVENT_IDS + :param **kwargs: The additional data to be passed along with this event. + """ + # Store the time this event was generated + self.time = time.time() + + # Make sure the event ID is valid + if event_id not in ExecutionEvent.EVENT_IDS: + print(ExecutionEvent.EVENT_IDS) + raise ValueError("The event ID %s is not a valid executor event." % event_id) + + # Store the event data + self.event_id = event_id + self.data = kwargs diff --git a/catkin_tools/execution/executor.py b/catkin_tools/execution/executor.py new file mode 100644 index 00000000..1c96d7ab --- /dev/null +++ b/catkin_tools/execution/executor.py @@ -0,0 +1,337 @@ +from __future__ import print_function + +import os +import time +import traceback + +from itertools import tee + +import trollius as asyncio + +from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import FIRST_COMPLETED + +from osrf_pycommon.process_utils import async_execute_process +from osrf_pycommon.process_utils import get_loop + +from catkin_tools.execution import job_server + +from catkin_tools.jobs.job import get_env_loaders + +from .events import ExecutionEvent + +from .io import IOBufferLogger + +from .stages import CommandStage +from .stages import FunctionStage + + +def split(values, cond): + """Split an iterable based on a condition.""" + head, tail = tee((cond(v), v) for v in values) + return [v for c, v in head if c], [v for c, v in tail if not c] + + +@asyncio.coroutine +def async_job(label, job, threadpool, event_queue, log_path): + """Run a sequence of Stages from a Job and collect their output. + + :param job: A Job instance + :threadpool: A thread pool executor for blocking stages + :event_queue: A queue for asynchronous events + """ + + # Initialize success flag + all_stages_succeeded = True + + # Jobs start occuping a jobserver job + occupying_job = True + + # Load environment for this job + job_env = job.getenv(os.environ) + + # Execute each stage of this job + for stage in job.stages: + # Logger reference in this scope for error reporting + logger = None + + # Abort the job if one of the stages has failed + if job.continue_on_failure and not all_stages_succeeded: + break + + # If the stage doesn't require a job token, release it temporarily + if stage.occupy_job: + if not occupying_job: + while job_server.try_acquire() is None: + yield asyncio.From(asyncio.sleep(0.2)) + occupying_job = True + else: + if occupying_job: + job_server.release() + occupying_job = False + + # Notify stage started + event_queue.put(ExecutionEvent( + 'STARTED_STAGE', + job_id=job.jid, + stage_label=stage.label)) + + if type(stage) is CommandStage: + try: + # Initiate the command + while True: + try: + stage.async_execute_process_kwargs['env'] = dict( + stage.async_execute_process_kwargs['env'], **job_env) + protocol_type = stage.logger_factory(label, job.jid, stage.label, event_queue, log_path) + transport, logger = yield asyncio.From( + async_execute_process( + protocol_type, + **stage.async_execute_process_kwargs)) + break + except OSError as exc: + if 'Text file busy' in str(exc): + # This is a transient error, try again shortly + # TODO: report the file causing the problem (exc.filename) + time.sleep(0.01) + continue + raise + + # Notify that a subprocess has been created + event_queue.put(ExecutionEvent( + 'SUBPROCESS', + job_id=job.jid, + stage_label=stage.label, + **stage.async_execute_process_kwargs)) + + # Asynchronously yield until this command is completed + retcode = yield asyncio.From(logger.complete) + except: + logger = IOBufferLogger(label, job.jid, stage.label, event_queue, log_path) + logger.err(str(traceback.format_exc())) + retcode = 3 + + elif type(stage) is FunctionStage: + logger = IOBufferLogger(label, job.jid, stage.label, event_queue, log_path) + try: + # Asynchronously yield until this function is completed + retcode = yield asyncio.From(get_loop().run_in_executor( + threadpool, + stage.function, + logger, + event_queue)) + except: + logger.err(str(traceback.format_exc())) + retcode = 3 + else: + raise TypeError("Bad Job Stage: {}".format(stage)) + + # Set whether this stage succeeded + stage_succeeded = (retcode == 0) + + # Update success tracker from this stage + all_stages_succeeded = all_stages_succeeded and stage_succeeded + + # Close the logger + logger.close() + + # Store the results from this stage + event_queue.put(ExecutionEvent( + 'FINISHED_STAGE', + job_id=job.jid, + stage_label=stage.label, + succeeded=stage_succeeded, + stdout=logger.stdout_buffer, + stderr=logger.stderr_buffer, + interleaved=logger.interleaved_buffer, + logfile_filename=logger.unique_logfile_name, + retcode=retcode)) + + # Finally, return whether all stages of the job completed + raise asyncio.Return(job.jid, all_stages_succeeded) + + +@asyncio.coroutine +def execute_jobs( + label, + jobs, + event_queue, + log_path, + max_toplevel_jobs=None, + continue_on_failure=False, + continue_without_deps=False): + """Process a number of jobs asynchronously. + + :param jobs: A list of topologically-sorted Jobs with no circular dependencies. + :param event_queue: A python queue for reporting events. + :param log_path: The path in which logfiles can be written + :param max_toplevel_jobs: Max number of top-level jobs + :param continue_on_failure: Keep running jobs even if one fails. + :param continue_without_deps: Run jobs even if their dependencies fail. + """ + + # Map of jid -> job + job_map = dict([(j.jid, j) for j in jobs]) + # Jobs which are not ready to be executed + pending_jobs = [] + # Jobs which are ready to be executed once workers are available + queued_jobs = [] + # List of active jobs + active_jobs = [] + # Set of active job futures + active_job_fs = set() + # Dict of completd jobs job_id -> succeeded + completed_jobs = {} + # List of jobs whose deps failed + abandoned_jobs = [] + + # Create a thread pool executor for blocking python stages in the asynchronous jobs + threadpool = ThreadPoolExecutor(max_workers=job_server.max_jobs()) + + # Immediately abandon jobs with bad dependencies + pending_jobs, new_abandoned_jobs = split(jobs, lambda j: all([d in job_map for d in j.deps])) + + for abandoned_job in new_abandoned_jobs: + abandoned_jobs.append(abandoned_job) + event_queue.put(ExecutionEvent( + 'ABANDONED_JOB', + job_id=abandoned_job.jid, + reason='MISSING_DEPS', + dep_ids=[d for d in abandoned_job.deps if d not in job_map])) + + # Initialize list of ready and pending jobs (jobs not ready to be executed) + queued_jobs, pending_jobs = split(pending_jobs, lambda j: len(j.deps) == 0) + + # Process all jobs asynchronously until there are none left + while len(active_job_fs) + len(queued_jobs) + len(pending_jobs) > 0: + + # Activate jobs while the jobserver dispenses tokens + while ((len(queued_jobs) > 0) and + ((max_toplevel_jobs is None) or (len(active_jobs) < max_toplevel_jobs)) and + (job_server.try_acquire() is not None)): + + # Pop a job off of the job queue + job = queued_jobs.pop(0) + + # Label it (for debugging) + job_server.add_label(job.jid) + + # Notify that the job is being started + event_queue.put(ExecutionEvent( + 'STARTED_JOB', + job_id=job.jid)) + + # Start the job coroutine + active_jobs.append(job) + active_job_fs.add(async_job(label, job, threadpool, event_queue, log_path)) + + # Report running jobs + event_queue.put(ExecutionEvent( + 'JOB_STATUS', + pending=[j.jid for j in pending_jobs], + queued=[j.jid for j in queued_jobs], + active=[j.jid for j in active_jobs], + abandoned=[j.jid for j in abandoned_jobs], + completed=completed_jobs + )) + + # Process jobs as they complete asynchronously + done_job_fs, active_job_fs = yield asyncio.From(asyncio.wait( + active_job_fs, + timeout=0.10, + return_when=FIRST_COMPLETED)) + + for done_job_f in done_job_fs: + # Capture a result once the job has finished + job_id, succeeded = yield asyncio.From(done_job_f) + + # Release a jobserver token now that this job has succeeded + job_server.release(job_id) + active_jobs = [j for j in active_jobs if j.jid != job_id] + + # Generate event with the results of this job + event_queue.put(ExecutionEvent( + 'FINISHED_JOB', + job_id=job_id, + succeeded=succeeded)) + + # Add the job to the completed list + completed_jobs[job_id] = succeeded + + # Handle failure modes + if not succeeded: + # Handle different abandoning policies + if not continue_on_failure: + # Abort all pending jobs if any job fails + new_abandoned_jobs = queued_jobs + pending_jobs + queued_jobs = [] + pending_jobs = [] + + # Notify that jobs have been abandoned + for abandoned_job in new_abandoned_jobs: + abandoned_jobs.append(abandoned_job) + event_queue.put(ExecutionEvent( + 'ABANDONED_JOB', + job_id=abandoned_job.jid, + reason='PEER_FAILED', + peer_job_id=job_id)) + + elif not continue_without_deps: + unhandled_abandoned_job_ids = [job_id] + + # Abandon jobs which depend on abandoned jobs + while len(unhandled_abandoned_job_ids) > 0: + # Get the abandoned job + abandoned_job_id = unhandled_abandoned_job_ids.pop(0) + + # Abandon all pending jobs which depend on this job_id + unhandled_abandoned_jobs, pending_jobs = split( + pending_jobs, + lambda j: abandoned_job_id in j.deps) + + # Handle each new abandoned job + for abandoned_job in unhandled_abandoned_jobs: + abandoned_jobs.append(abandoned_job) + # Notify if any jobs have been abandoned + event_queue.put(ExecutionEvent( + 'ABANDONED_JOB', + job_id=abandoned_job.jid, + reason='DEP_FAILED', + direct_dep_job_id=abandoned_job_id, + dep_job_id=job_id)) + + # Add additional job ids to check + unhandled_abandoned_job_ids.extend( + [j.jid for j in unhandled_abandoned_jobs]) + + # Update the list of ready jobs (based on completed job dependencies) + new_queued_jobs, pending_jobs = split( + pending_jobs, + lambda j: j.all_deps_completed(completed_jobs)) + queued_jobs.extend(new_queued_jobs) + + # Notify of newly queued jobs + for queued_job in new_queued_jobs: + event_queue.put(ExecutionEvent( + 'QUEUED_JOB', + job_id=queued_job.jid)) + + # Report running jobs + event_queue.put(ExecutionEvent( + 'JOB_STATUS', + pending=[j.jid for j in pending_jobs], + queued=[j.jid for j in queued_jobs], + active=[j.jid for j in active_jobs], + abandoned=[j.jid for j in abandoned_jobs], + completed=completed_jobs + )) + + raise asyncio.Return(all(completed_jobs.values())) + + +def run_until_complete(coroutine): + # Get event loop + loop = get_loop() + + # Run jobs + return loop.run_until_complete(coroutine) diff --git a/catkin_tools/execution/io.py b/catkin_tools/execution/io.py new file mode 100644 index 00000000..e9b9aab9 --- /dev/null +++ b/catkin_tools/execution/io.py @@ -0,0 +1,197 @@ + +import os +import re +import shutil + +from glob import glob + +from osrf_pycommon.process_utils import AsyncSubprocessProtocol + +from catkin_tools.common import mkdir_p + +from .events import ExecutionEvent + + +MAX_LOGFILE_HISTORY = 10 + + +class IOBufferContainer(object): + + """A simple buffer container for use in logging. + + This class will open a logfile for a given job stage and write to it + continuously while receiving stdout and stderr. + """ + + def __init__(self, label, job_id, stage_label, event_queue, log_path): + self.label = label + self.job_id = job_id + self.stage_label = stage_label + self.event_queue = event_queue + self.log_path = log_path + + self.is_open = False + self.stdout_buffer = b"" + self.stderr_buffer = b"" + self.interleaved_buffer = b"" + + # Construct the logfile path for this job and stage + logfile_dir_path = os.path.join(log_path, self.job_id) + self.logfile_basename = os.path.join(logfile_dir_path, '.'.join([self.label, self.stage_label])) + self.logfile_name = '{}.log'.format(self.logfile_basename) + + # Create the logfile dir if it doesn't exist + if not os.path.exists(logfile_dir_path): + mkdir_p(logfile_dir_path) + + # Get the existing number of logfiles + # TODO: Make this number global across all build stages + existing_logfile_indices = sorted([int(lf.split('.')[-2]) + for lf in glob('{}.*.log'.format(self.logfile_basename))]) + if len(existing_logfile_indices) == 0: + self.logfile_index = 0 + else: + self.logfile_index = 1 + existing_logfile_indices[-1] + + # Generate the logfile name + self.unique_logfile_name = '{}.{:0>{}}.log'.format(self.logfile_basename, self.logfile_index, 3) + + # Remove colliding file if necessary + if os.path.exists(self.logfile_name): + os.unlink(self.logfile_name) + + # Open logfile + self.log_file = open(self.logfile_name, 'wb') + self.is_open = True + + def close(self): + # Close logfile + self.log_file.close() + self.is_open = False + + # Copy logfile to unique name + shutil.copy(self.logfile_name, self.unique_logfile_name) + + # Remove older logfiles + for logfile_name in glob('{}.*.log'.format(self.logfile_basename)): + if (self.logfile_index - int(logfile_name.split('.')[-2])) >= MAX_LOGFILE_HISTORY: + os.unlink(logfile_name) + + # Save output from stderr (these don't get deleted until cleaning the logfile directory) + if len(self.stderr_buffer) > 0: + with open(self.unique_logfile_name + '.stderr', 'wb') as logfile: + logfile.write(self.stderr_buffer) + + def __del__(self): + if self.is_open: + self.close() + + @classmethod + def factory(cls, label, job_id, stage_label, event_queue, log_path): + """Factory method for constructing with job metadata.""" + + def init_proxy(*args, **kwargs): + return cls(label, job_id, stage_label, event_queue, log_path, *args, **kwargs) + + return init_proxy + + +class IOBufferLogger(IOBufferContainer): + + """This is a logging class to be used instead of sys.stdout and sys.stderr + in FunctionStage operations. + + This class also generates `stdout` and `stderr` events. + """ + + def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs): + IOBufferContainer.__init__(self, label, job_id, stage_label, event_queue, log_path) + + def out(self, data): + """ + :type data: str + """ + encoded_data = data.encode('utf-8') + self.stdout_buffer += encoded_data + self.interleaved_buffer += encoded_data + + self.event_queue.put(ExecutionEvent( + 'STDOUT', + job_id=self.job_id, + stage_label=self.stage_label, + data=data)) + + self.log_file.write(encoded_data) + + def err(self, data): + """ + :type data: str + """ + encoded_data = data.encode('utf-8') + self.stderr_buffer += encoded_data + self.interleaved_buffer += encoded_data + + self.event_queue.put(ExecutionEvent( + 'STDERR', + job_id=self.job_id, + stage_label=self.stage_label, + data=data)) + + self.log_file.write(encoded_data) + + +class IOBufferProtocol(IOBufferContainer, AsyncSubprocessProtocol): + + """An asyncio protocol that collects stdout and stderr. + + This class also generates `stdout` and `stderr` events. + + Since the underlying asyncio API constructs the actual protocols, this + class provides a factory method to inject the job and stage information + into the created protocol. + """ + + def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs): + IOBufferContainer.__init__(self, label, job_id, stage_label, event_queue, log_path) + AsyncSubprocessProtocol.__init__(self, *args, **kwargs) + + def on_stdout_received(self, data): + """ + :type data: utf-8 encoded bytes + """ + self.stdout_buffer += data + self.interleaved_buffer += data + self.log_file.write(data) + + decoded_data = data.decode('utf-8') + + # TODO: This is CMake-specific, it should be defined in the CMake job + progress_matches = re.match('\[\s*([0-9]+)%\]', decoded_data) + if progress_matches is not None: + self.event_queue.put(ExecutionEvent( + 'STAGE_PROGRESS', + job_id=self.job_id, + stage_label=self.stage_label, + percent=str(progress_matches.groups()[0]))) + + self.event_queue.put(ExecutionEvent( + 'STDOUT', + job_id=self.job_id, + stage_label=self.stage_label, + data=decoded_data)) + + def on_stderr_received(self, data): + """ + :type data: utf-8 encoded bytes + """ + self.stderr_buffer += data + self.interleaved_buffer += data + self.log_file.write(data) + + decoded_data = data.decode('utf-8') + + self.event_queue.put(ExecutionEvent( + 'STDERR', + job_id=self.job_id, + stage_label=self.stage_label, + data=decoded_data)) diff --git a/catkin_tools/execution/job_server.py b/catkin_tools/execution/job_server.py new file mode 100644 index 00000000..cb75058e --- /dev/null +++ b/catkin_tools/execution/job_server.py @@ -0,0 +1,433 @@ + +from __future__ import print_function + +from multiprocessing import cpu_count +from tempfile import mkstemp +from termios import FIONREAD + +import array +import errno +import fcntl +import os +import re +import subprocess +import time + +from catkin_tools.common import log +from catkin_tools.common import version_tuple + +from catkin_tools.terminal_color import ColorMapper + + +mapper = ColorMapper() +clr = mapper.clr + + +def memory_usage(): + """ + Get used and total memory usage. + + :returns: Used and total memory in bytes + :rtype: tuple + """ + + # Handle optional psutil support + try: + import psutil + + psutil_version = version_tuple(psutil.__version__) + if psutil_version < (0, 6, 0): + usage = psutil.phymem_usage() + used = usage.used + else: + usage = psutil.virtual_memory() + used = usage.total - usage.available + + return used, usage.total + + except ImportError: + pass + + return None, None + + +JOBSERVER_SUPPORT_MAKEFILE = b''' +all: +\techo $(MAKEFLAGS) | grep -- '--jobserver-fds' +''' + + +def test_gnu_make_support(): + """ + Test if the system 'make' supports the job server implementation. + + This simply checks if the `--jobserver-fds` option is supported by the + `make` command. It does not tests if the jobserver is actually working + properly. + """ + + fd, makefile = mkstemp() + os.write(fd, JOBSERVER_SUPPORT_MAKEFILE) + os.close(fd) + + ret = subprocess.call(['make', '-f', makefile, '-j2'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + os.unlink(makefile) + return (ret == 0) + + +class JobServer(object): + # Whether the job server has been initialized + _initialized = False + + # Flag designating whether the `make` program supports the GNU Make + # jobserver interface + _gnu_make_supported = None + + # Initialize variables + _load_ok = True + _mem_ok = True + _internal_jobs = [] + _max_load = 0 + _max_jobs = 0 + _job_pipe = os.pipe() + + # Setting fd inheritance is required in Python > 3.4 + # This is set by default in Python 2.7 + # For more info see: https://docs.python.org/3.4/library/os.html#fd-inheritance + if hasattr(os, 'set_inheritable'): + for fd in _job_pipe: + os.set_inheritable(fd, True) + if not os.get_inheritable(fd): + log(clr('@{yf}@!Warning: jobserver file descriptors are not inheritable.@|')) + + @classmethod + def _set_max_jobs(cls, max_jobs): + """Set the maximum number of jobs to be used with the jobserver. + + This will wait for all active jobs to be completed, then re-initialize the job pipe. + """ + + # Read all possible tokens from the pipe + try: + tokens = os.read(cls._job_pipe[0], cls._max_jobs) + except OSError as e: + if e.errno != errno.EINTR: + raise + + # Update max jobs + cls._max_jobs = max_jobs + + # Initialize the pipe with max_jobs tokens + for i in range(cls._max_jobs): + os.write(cls._job_pipe[1], b'+') + + @classmethod + def _set_max_mem(cls, max_mem): + """ + Set the maximum memory to keep instantiating jobs. + + :param max_mem: String describing the maximum memory that can be used + on the system. It can either describe memory percentage or absolute + amount. Use 'P%' for percentage or 'N' for absolute value in bytes, + 'Nk' for kilobytes, 'Nm' for megabytes, and 'Ng' for gigabytes. + :type max_mem: str + """ + + if max_mem is None: + cls._max_mem = None + return + elif type(max_mem) is float or type(max_mem) is int: + mem_percent = max_mem + elif type(max_mem) is str: + m_percent = re.search('([0-9]+)\%', max_mem) + m_abs = re.search('([0-9]+)([kKmMgG]{0,1})', max_mem) + + if m_percent is None and m_abs is None: + cls._max_mem = None + return + + if m_percent: + mem_percent = m_abs.group(1) + elif m_abs: + val = float(m_abs.group(1)) + mag_symbol = m_abs.group(2) + + _, total_mem = memory_usage() + + if mag_symbol == '': + mag = 1.0 + elif mag_symbol.lower() == 'k': + mag = 1024.0 + elif mag_symbol.lower() == 'm': + mag = pow(1024.0, 2) + elif mag_symbol.lower() == 'g': + mag = pow(1024.0, 3) + + mem_percent = 100.0 * val * mag / total_mem + + cls._max_mem = max(0.0, min(100.0, float(mem_percent))) + + @classmethod + def _check_load(cls): + if cls._max_load is not None: + try: + load = os.getloadavg() + if load[1] < cls._max_load: + cls._load_ok = True + else: + cls._load_ok = False + except NotImplementedError: + cls._load_ok = True + + return cls._load_ok + + @classmethod + def _check_mem(cls): + if cls._max_mem is not None: + mem_used, mem_total = memory_usage() + mem_percent_used = 100.0 * float(mem_used) / float(mem_total) + if mem_percent_used > cls._max_mem: + cls._mem_ok = False + else: + cls._mem_ok = True + + return cls._mem_ok + + @classmethod + def _check_conditions(cls): + return (cls._check_load() and cls._check_mem()) or cls._running_jobs() == 0 + + @classmethod + def _acquire(cls): + """ + Obtain a job server token. Be sure to call _release() to avoid + deadlocks. + """ + try: + # read a token from the job pipe + token = os.read(cls._job_pipe[0], 1) + return token + except OSError as e: + if e.errno != errno.EINTR: + raise + + return None + + @classmethod + def _release(cls): + """ + Write a token to the job pipe. + """ + os.write(cls._job_pipe[1], b'+') + + @classmethod + def _running_jobs(cls): + + try: + buf = array.array('i', [0]) + if fcntl.ioctl(cls._job_pipe[0], FIONREAD, buf) == 0: + return cls._max_jobs - buf[0] + except NotImplementedError: + pass + except OSError: + pass + + return cls._max_jobs + + +def initialize(max_jobs=None, max_load=None, max_mem=None, gnu_make_enabled=False): + """ + Initialize the global GNU Make jobserver. + + :param max_jobs: the maximum number of jobs available + :param max_load: do not dispatch additional jobs if this system load + value is exceeded + :param max_mem: do not dispatch additional jobs if system physical + memory usage exceeds this value (see _set_max_mem for additional + documentation) + """ + + # Check initialization + if JobServer._initialized is True: + return + + # Check if the jobserver is supported + if JobServer._gnu_make_supported is None: + JobServer._gnu_make_supported = test_gnu_make_support() + + if not JobServer._gnu_make_supported: + log(clr('@!@{yf}WARNING:@| Make job server not supported. The number of Make ' + 'jobs may exceed the number of CPU cores.@|')) + + # Set gnu make compatibilty enabled + JobServer._gnu_make_enabled = gnu_make_enabled + + # Set the maximum number of jobs + if max_jobs is None: + try: + max_jobs = cpu_count() + except NotImplementedError: + log('@{yf}WARNING: Failed to determine the cpu_count, falling back to 1 jobs as the default.@|') + max_jobs = 1 + else: + max_jobs = int(max_jobs) + + JobServer._set_max_jobs(max_jobs) + JobServer._max_load = max_load + JobServer._set_max_mem(max_mem) + + JobServer._initialized = True + + +def load_ok(): + return JobServer._load_ok + + +def mem_ok(): + return JobServer._mem_ok + + +def set_max_mem(max_mem): + """ + Set the maximum memory to keep instantiating jobs. + + :param max_mem: String describing the maximum memory that can be used on + the system. It can either describe memory percentage or absolute amount. + Use 'P%' for percentage or 'N' for absolute value in bytes, 'Nk' for + kilobytes, 'Nm' for megabytes, and 'Ng' for gigabytes. + :type max_mem: str + """ + + JobServer._set_max_mem(max_mem) + + +def wait_acquire(): + """ + Block until a job server token is acquired, then return it. + """ + + token = None + + while token is None: + # make sure we're observing load and memory maximums + if not JobServer._check_conditions(): + time.sleep(0.01) + continue + + # try to get a job token + token = JobServer._acquire() + + return token + + +def acquire(): + """ + Block until a job server token is acquired, then return it. + """ + + token = None + + # make sure we're observing load and memory maximums + if JobServer._check_conditions(): + # try to get a job token + token = JobServer._acquire() + + return token + + +def add_label(label): + JobServer._internal_jobs.append(label) + + +def del_label(label): + JobServer._internal_jobs.remove(label) + + +def try_acquire_gen(): + """ + Yield None until a job server token is acquired, then yield it. + """ + while True: + # make sure we're observing load and memory maximums + if _check_conditions() and running_jobs() < max_jobs(): + # try to get a job token + token = JobServer._acquire() + yield token + else: + yield None + + +def try_acquire(): + """ + Try to acquire a job token, return None if not available. + """ + # make sure we're observing load and memory maximums + if JobServer._check_conditions() and running_jobs() < max_jobs(): + # try to get a job token + token = JobServer._acquire() + return token + + return None + + +def release(label=None): + """ + Release a job server token. + """ + JobServer._release() + if label is not None: + del_label(label) + + +def gnu_make_enabled(): + return JobServer._gnu_make_supported and JobServer._gnu_make_enabled + + +def gnu_make_args(): + """ + Get required arguments for spawning child gnu Make processes. + """ + + if gnu_make_enabled(): + return ["--jobserver-fds=%d,%d" % JobServer._job_pipe, "-j"] + else: + return [] + + +def max_jobs(): + """ + Get the maximum number of jobs. + """ + + return JobServer._max_jobs + + +def running_jobs(): + """ + Try to estimate the number of currently running jobs. + """ + + if not gnu_make_enabled(): + return 0 + + return JobServer._running_jobs() + + +def internal_jobs(): + return JobServer._internal_jobs + + +class JobGuard: + + """ + Context manager representing a jobserver job. + """ + + def __enter__(self): + wait_acquire() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + release() + return False diff --git a/catkin_tools/execution/jobs.py b/catkin_tools/execution/jobs.py new file mode 100644 index 00000000..7e8d9ea5 --- /dev/null +++ b/catkin_tools/execution/jobs.py @@ -0,0 +1,56 @@ + +from __future__ import print_function + +from multiprocessing import cpu_count +from tempfile import mkstemp +from termios import FIONREAD + +import array +import errno +import fcntl +import os +import re +import subprocess +import time + +from catkin_tools.common import log +from catkin_tools.common import version_tuple + +from catkin_tools.terminal_color import ColorMapper + + +mapper = ColorMapper() +clr = mapper.clr + + +class Job(object): + + """A Job is a series of operations, each of which is considered a "stage" of the job.""" + + def __init__(self, jid, deps, env_loader, stages, continue_on_failure=True): + """ + jid: Unique job identifier + deps: Dependencies (in terms of other jid's) + stages: List of stages to be run in order + + """ + self.jid = jid + self.deps = deps + self.env_loader = env_loader + self.stages = stages + self.continue_on_failure = continue_on_failure + + def all_deps_completed(self, completed_jobs): + """Return True if all dependencies have been completed.""" + return all([dep_id in completed_jobs for dep_id in self.deps]) + + def all_deps_succeeded(self, completed_jobs): + """Return True if all dependencies have been completed and succeeded.""" + return all([completed_jobs.get(dep_id, False) for dep_id in self.deps]) + + def any_deps_failed(self, completed_jobs): + """Return True if any dependencies which have been completed have failed.""" + return any([not completed_jobs.get(dep_id, True) for dep_id in self.deps]) + + def getenv(self, env): + return self.env_loader(env) diff --git a/catkin_tools/execution/stages.py b/catkin_tools/execution/stages.py new file mode 100644 index 00000000..57822c69 --- /dev/null +++ b/catkin_tools/execution/stages.py @@ -0,0 +1,81 @@ + +import os + +from .io import IOBufferLogger +from .io import IOBufferProtocol + + +class Stage(object): + + """A description of one of the serially-executed stages of a Job. + + Like Jobs, Stages are stateless, and simply describe what needs to be done + and how to do it. + """ + + def __init__(self, label, logger_factory=IOBufferProtocol.factory, occupy_job=True): + self.label = str(label) + self.logger_factory = logger_factory + self.occupy_job = occupy_job + + +class CommandStage(Stage): + + """Job stage that describes a system command. + + :param label: The label for the stage + :param command: A list of strings composing a system command + :param protocol: A protocol class to use for this stage + + Additional kwargs are passed to `async_execute_process` + """ + + def __init__( + self, + label, + cmd, + cwd=os.getcwd(), + env={}, + shell=False, + emulate_tty=True, + stderr_to_stdout=False, + occupy_job=True, + logger_factory=IOBufferProtocol.factory): + """ """ + + if not type(cmd) in [list, tuple] or not all([type(s) is str for s in cmd]): + raise ValueError('Command stage must be a list of strings: {}'.format(cmd)) + super(CommandStage, self).__init__(label, logger_factory, occupy_job) + + self.async_execute_process_kwargs = { + 'cmd': cmd, + 'cwd': cwd, + 'env': env, + 'shell': shell, + # Emulate tty for cli colors + 'emulate_tty': emulate_tty, + # Capture stderr and stdout separately + 'stderr_to_stdout': stderr_to_stdout, + } + + +class FunctionStage(Stage): + + """Job stage that describes a python function. + + :param label: The label for the stage + :param function: A python function which returns 0 on success + + Functions must take the arguments: + - logger + - event_queue + """ + + def __init__(self, label, function, logger_factory=IOBufferLogger.factory, occupy_job=True, *args, **kwargs): + if not callable(function): + raise ValueError('Function stage must be callable.') + super(FunctionStage, self).__init__(label, logger_factory, occupy_job) + + def function_proxy(logger, event_queue): + return function(logger, event_queue, *args, **kwargs) + self.function = function_proxy diff --git a/catkin_tools/jobs/__init__.py b/catkin_tools/jobs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/catkin_tools/jobs/catkin.py b/catkin_tools/jobs/catkin.py new file mode 100644 index 00000000..3a3a3b2f --- /dev/null +++ b/catkin_tools/jobs/catkin.py @@ -0,0 +1,138 @@ +# Copyright 2014 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import os +import threading + + +from catkin_tools.argument_parsing import handle_make_arguments + +from catkin_tools.common import mkdir_p + +from catkin_tools.execution.jobs import Job +from catkin_tools.execution.stages import CommandStage +from catkin_tools.execution.stages import FunctionStage + +from .commands.cmake import CMAKE_EXEC +from .commands.cmake import CMakeIOBufferProtocol +from .commands.make import MAKE_EXEC + +from .job import create_env_file +from .job import get_env_file_path +from .job import get_env_loader +from .job import get_package_build_space_path +from .job import makedirs + + +# job factories + +def create_catkin_build_job(context, package, package_path, dependencies, force_cmake, pre_clean): + """Job class for building catkin packages""" + + # Package source space path + pkg_dir = os.path.join(context.source_space_abs, package_path) + + # Package build space path + build_space = context.package_build_space(package) + # Package devel space path + devel_space = context.package_devel_space(package) + # Package install space path + install_space = context.package_install_space(package) + + # Create job stages + stages = [] + + # Create package build space + stages.append(FunctionStage( + 'mkdir', + makedirs, + path=build_space)) + + # Only use the env prefix for building if the develspace is isolated + env_prefix = [] + if context.isolate_devel: + # Create an environment file + env_file_path = get_env_file_path(package, context) + stages.append(FunctionStage( + 'envgen', + create_env_file, + package=package, + context=context, + env_file_path=env_file_path)) + + env_prefix = [env_file_path] + + # Construct CMake command + makefile_path = os.path.join(build_space, 'Makefile') + if not os.path.isfile(makefile_path) or force_cmake: + stages.append(CommandStage( + 'cmake', + (env_prefix + [ + CMAKE_EXEC, + pkg_dir, + '--no-warn-unused-cli', + '-DCATKIN_DEVEL_PREFIX=' + devel_space, + '-DCMAKE_INSTALL_PREFIX=' + install_space] + + context.cmake_args), + cwd=build_space, + logger_factory=CMakeIOBufferProtocol.factory_factory(pkg_dir), + occupy_job=True + )) + else: + stages.append(CommandStage( + 'check', + env_prefix + [MAKE_EXEC, 'cmake_check_build_system'], + cwd=build_space, + logger_factory=CMakeIOBufferProtocol.factory_factory(pkg_dir), + occupy_job=True + )) + + # Pre-clean command + if pre_clean: + make_args = handle_make_arguments( + context.make_args + context.catkin_make_args) + stages.append(CommandStage( + 'preclean', + env_prefix + [MAKE_EXEC, 'clean'] + make_args, + cwd=build_space, + )) + + # Make command + make_args = handle_make_arguments( + context.make_args + context.catkin_make_args) + stages.append(CommandStage( + 'make', + env_prefix + [MAKE_EXEC] + make_args, + cwd=build_space, + )) + + # Make install command, if installing + if context.install: + stages.append(CommandStage( + 'install', + env_prefix + [MAKE_EXEC, 'install'], + cwd=build_space)) + + return Job( + jid=package.name, + deps=dependencies, + env_loader=get_env_loader(package, context), + stages=stages) + +description = dict( + build_type='catkin', + description="Builds a catkin package.", + create_build_job=create_catkin_build_job +) diff --git a/catkin_tools/jobs/cmake.py b/catkin_tools/jobs/cmake.py new file mode 100644 index 00000000..69c47c2b --- /dev/null +++ b/catkin_tools/jobs/cmake.py @@ -0,0 +1,343 @@ +# Copyright 2014 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import stat +import subprocess +import sys +import tempfile + + +from catkin_tools.argument_parsing import handle_make_arguments + +from catkin_tools.common import mkdir_p + +from .commands.cmake import CMAKE_EXEC +from .commands.cmake import CMakeIOBufferProtocol +from .commands.make import MAKE_EXEC + +from .job import create_env_file +from .job import get_env_file_path +from .job import get_env_loader +from .job import get_package_build_space_path +from .job import makedirs + +from catkin_tools.execution.jobs import Job +from catkin_tools.execution.stages import CommandStage +from catkin_tools.execution.stages import FunctionStage + +from catkin_tools.terminal_color import ColorMapper + + +mapper = ColorMapper() +clr = mapper.clr + +# FileNotFoundError from Python3 +try: + FileNotFoundError +except NameError: + class FileNotFoundError(OSError): + pass + + +def get_python_install_dir(): + """Returns the same value as the CMake variable PYTHON_INSTALL_DIR + + The PYTHON_INSTALL_DIR variable is normally set from the CMake file: + + catkin/cmake/python.cmake + + :returns: Python install directory for the system Python + :rtype: str + """ + python_install_dir = 'lib' + if os.name != 'nt': + python_version_xdoty = str(sys.version_info[0]) + '.' + str(sys.version_info[1]) + python_install_dir = os.path.join(python_install_dir, 'python' + python_version_xdoty) + + python_use_debian_layout = os.path.exists('/etc/debian_version') + python_packages_dir = 'dist-packages' if python_use_debian_layout else 'site-packages' + python_install_dir = os.path.join(python_install_dir, python_packages_dir) + return python_install_dir + + +def get_multiarch(): + if not sys.platform.lower().startswith('linux'): + return '' + # this function returns the suffix for lib directories on supported systems or an empty string + # it uses two step approach to look for multiarch: first run gcc -print-multiarch and if + # failed try to run dpkg-architecture + error_thrown = False + try: + p = subprocess.Popen( + ['gcc', '-print-multiarch'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + except (OSError, FileNotFoundError): + error_thrown = True + if error_thrown or p.returncode != 0: + try: + out, err = subprocess.Popen( + ['dpkg-architecture', '-qDEB_HOST_MULTIARCH'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() + except (OSError, FileNotFoundError): + return '' + # be sure to return empty string or a valid multiarch tuple + decoded = out.decode().strip() + assert(not decoded or decoded.count('-') == 2) + return decoded + + +SETUP_FILE_TEMPLATE = """\ +#!/usr/bin/env sh +# generated from catkin_tools.jobs.cmake python module + +# remember type of shell if not already set +if [ -z "$CATKIN_SHELL" ]; then + CATKIN_SHELL=sh +fi + +# detect if running on Darwin platform +_UNAME=`uname -s` +IS_DARWIN=0 +if [ "$_UNAME" = "Darwin" ]; then + IS_DARWIN=1 +fi + +# Prepend to the environment +export CMAKE_PREFIX_PATH="{cmake_prefix_path}$CMAKE_PREFIX_PATH" +if [ $IS_DARWIN -eq 0 ]; then + export LD_LIBRARY_PATH="{ld_path}$LD_LIBRARY_PATH" +else + export DYLD_LIBRARY_PATH="{ld_path}$DYLD_LIBRARY_PATH" +fi +export CPATH="{cpath}$CPATH" +export LIBRARY_PATH="{library_path}$LIBRARY_PATH" +export PATH="{path}$PATH" +export PKG_CONFIG_PATH="{pkgcfg_path}$PKG_CONFIG_PATH" +export PYTHONPATH="{pythonpath}$PYTHONPATH" +""" + +ENV_FILE_TEMPLATE = """\ +#!/usr/bin/env sh +# generated from catkin_tools.jobs.cmake + +if [ $# -eq 0 ] ; then + /bin/echo "Usage: env.sh COMMANDS" + /bin/echo "Calling env.sh without arguments is not supported anymore. Instead\ +spawn a subshell and source a setup file manually." + exit 1 +fi + +# ensure to not use different shell type which was set before +CATKIN_SHELL=sh + +# source setup.sh from same directory as this file +_CATKIN_SETUP_DIR=$(cd "`dirname "$0"`" > /dev/null && pwd) +. "$_CATKIN_SETUP_DIR/setup.sh" +exec "$@" +""" + + +def generate_env_file(logger, event_queue, context, install_target): + env_file_path = os.path.join(install_target, 'env.sh') + if os.path.exists(env_file_path): + return 0 + + env_file_directory = os.path.dirname(env_file_path) + if not os.path.exists(env_file_directory): + os.makedirs(env_file_directory) + + logger.out(clr("Generating env file: @!@{yf}{}@|").format(env_file_path)) + + # Create a temporary file in the setup_file_directory, so os.rename cannot fail + tmp_dst_handle, tmp_dst_path = tempfile.mkstemp( + dir=env_file_directory, + prefix=os.path.basename(env_file_path) + '.') + + # Write the filled template to the file + subs = {} + data = ENV_FILE_TEMPLATE.format(**subs) + os.write(tmp_dst_handle, data.encode('utf-8')) + os.close(tmp_dst_handle) + + # Do an atomic rename with os.rename + os.rename(tmp_dst_path, env_file_path) + os.chmod(env_file_path, 0o755) + + return 0 + + +def generate_setup_file(logger, event_queue, context, install_target): + + # Create full path to setup file + setup_file_path = os.path.join(install_target, 'setup.sh') + + # Check if the setup file needs to be generated + if context.install: + # Create the setup file in the install space + setup_file_needed = context.isolate_install or not os.path.exists(setup_file_path) + else: + # Do not replace existing setup.sh if devel space is merged + setup_file_needed = context.isolate_devel or not os.path.exists(setup_file_path) + + if not setup_file_needed: + logger.out("Setup file does not need to be generated.") + return 0 + else: + logger.out(clr("Generating setup file: @!@{yf}{}@|").format(setup_file_path)) + + # Create the setup file that dependant packages will source + arch = get_multiarch() + subs = {} + subs['cmake_prefix_path'] = install_target + ":" + subs['ld_path'] = os.path.join(install_target, 'lib') + ":" + pythonpath = os.path.join(install_target, get_python_install_dir()) + subs['pythonpath'] = pythonpath + ':' + subs['pkgcfg_path'] = os.path.join(install_target, 'lib', 'pkgconfig') + ":" + subs['path'] = os.path.join(install_target, 'bin') + ":" + subs['cpath'] = os.path.join(install_target, 'include') + ":" + subs['library_path'] = os.path.join(install_target, 'lib') + ":" + if arch: + subs['ld_path'] += os.path.join(install_target, 'lib', arch) + ":" + subs['pkgcfg_path'] += os.path.join(install_target, 'lib', arch, 'pkgconfig') + ":" + setup_file_directory = os.path.dirname(setup_file_path) + if not os.path.exists(setup_file_directory): + os.makedirs(setup_file_directory) + + # Create a temporary file in the setup_file_directory, so os.rename cannot fail + tmp_dst_handle, tmp_dst_path = tempfile.mkstemp( + dir=setup_file_directory, + prefix=os.path.basename(setup_file_path) + '.') + + # Write the filled template to the file + data = SETUP_FILE_TEMPLATE.format(**subs) + os.write(tmp_dst_handle, data.encode('utf-8')) + os.close(tmp_dst_handle) + + # Do an atomic rename with os.rename + os.rename(tmp_dst_path, setup_file_path) + + return 0 + + +def create_cmake_build_job(context, package, package_path, dependencies, force_cmake, pre_clean): + + # Package source space path + pkg_dir = os.path.join(context.source_space_abs, package_path) + + # Package build space path + build_space = context.package_build_space(package) + # Package devel space path + devel_space = context.package_devel_space(package) + # Package install space path + install_space = context.package_install_space(package) + + # Get the actual install location + install_target = install_space if context.install else devel_space + + # Create job stages + stages = [] + + # Create package build space + stages.append(FunctionStage( + 'mkdir', + makedirs, + path=build_space)) + + # Only use the env prefix for building if the develspace is isolated + env_prefix = [] + if context.isolate_devel: + # Create an environment file + env_file_path = get_env_file_path(package, context) + stages.append(FunctionStage( + 'envgen', + create_env_file, + package=package, + context=context, + env_file_path=env_file_path)) + + env_prefix = [env_file_path] + + # CMake command + makefile_path = os.path.join(build_space, 'Makefile') + if not os.path.isfile(makefile_path) or force_cmake: + stages.append(CommandStage( + 'cmake', + (env_prefix + [ + CMAKE_EXEC, + pkg_dir, + '--no-warn-unused-cli', + '-DCMAKE_INSTALL_PREFIX=' + install_target] + + context.cmake_args), + cwd=build_space, + logger_factory=CMakeIOBufferProtocol.factory_factory(pkg_dir) + )) + else: + stages.append(CommandStage( + 'check', + env_prefix + [MAKE_EXEC, 'cmake_check_build_system'], + cwd=build_space, + logger_factory=CMakeIOBufferProtocol.factory_factory(pkg_dir) + )) + + # Pre-clean command + if pre_clean: + make_args = handle_make_arguments( + context.make_args + context.catkin_make_args) + stages.append(CommandStage( + 'preclean', + env_prefix + [MAKE_EXEC, 'clean'] + make_args, + cwd=build_space, + )) + + # Make command + stages.append(CommandStage( + 'make', + env_prefix + [MAKE_EXEC] + handle_make_arguments(context.make_args), + cwd=build_space + )) + + # Make install command (always run on plain cmake) + stages.append(CommandStage( + 'install', + env_prefix + [MAKE_EXEC, 'install'], + cwd=build_space)) + + # Determine the location where the setup.sh file should be created + stages.append(FunctionStage( + 'setupgen', + generate_setup_file, + context=context, + install_target=install_target)) + + stages.append(FunctionStage( + 'envgen', + generate_env_file, + context=context, + install_target=install_target)) + + return Job( + jid=package.name, + deps=dependencies, + env_loader=get_env_loader(package, context), + stages=stages) + + +description = dict( + build_type='cmake', + description="Builds a plain CMake package.", + create_build_job=create_cmake_build_job +) diff --git a/catkin_tools/jobs/commands/__init__.py b/catkin_tools/jobs/commands/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/catkin_tools/jobs/commands/cmake.py b/catkin_tools/jobs/commands/cmake.py new file mode 100644 index 00000000..fdd3c50a --- /dev/null +++ b/catkin_tools/jobs/commands/cmake.py @@ -0,0 +1,152 @@ +# Copyright 2014 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os + +from catkin_tools.execution.io import IOBufferProtocol + +from catkin_tools.terminal_color import fmt +from catkin_tools.terminal_color import sanitize + +from catkin_tools.utils import which + +CMAKE_EXEC = which('cmake') + + +def split_to_last_line_break(data): + """This splits a byte buffer into (head, tail) where head contains the + beginning of the buffer to the last line break (inclusive) and the tail + contains all bytes after that.""" + last_break_index = 1 + data.rfind(b'\n') + return data[:last_break_index], data[last_break_index:] + + +class CMakeIOBufferProtocol(IOBufferProtocol): + + """An asyncio protocol that collects stdout and stderr. + + This class also generates `stdout` and `stderr` events. + + Since the underlying asyncio API constructs the actual protocols, this + class provides a factory method to inject the job and stage information + into the created protocol. + """ + + def __init__(self, label, job_id, stage_label, event_queue, log_path, source_path, *args, **kwargs): + super(CMakeIOBufferProtocol, self).__init__(label, job_id, stage_label, event_queue, log_path, *args, **kwargs) + self.source_path = source_path + + # These are buffers for incomplete lines that we want to wait to parse + # until we have received them completely + self.stdout_tail = b'' + self.stderr_tail = b'' + + def on_stdout_received(self, data): + data_head, self.stdout_tail = split_to_last_line_break(self.stdout_tail + data) + colored = self.color_lines(data_head) + super(CMakeIOBufferProtocol, self).on_stdout_received(colored) + + def on_stderr_received(self, data): + data_head, self.stderr_tail = split_to_last_line_break(self.stderr_tail + data) + colored = self.color_lines(data_head) + super(CMakeIOBufferProtocol, self).on_stderr_received(colored) + + def close(self): + # Make sure tail buffers are flushed + self.flush_tails() + super(CMakeIOBufferProtocol, self).close() + + def flush_tails(self): + """Write out any unprocessed tail buffers.""" + + colored = self.color_lines(self.stdout_tail) + super(CMakeIOBufferProtocol, self).on_stdout_received(colored) + self.stdout_tail = b'' + + colored = self.color_lines(self.stderr_tail) + super(CMakeIOBufferProtocol, self).on_stderr_received(colored) + self.stderr_tail = b'' + + def color_lines(self, data): + """Apply colorization rules to each line in data""" + decoded_data = data.decode('utf-8') + # TODO: This will only work if all lines are received at once. Instead + # of direclty splitting lines, we should buffer the data lines until + # the last character is a line break + lines = decoded_data.splitlines(True) # Keep line breaks + colored_lines = [self.colorize_cmake(l) for l in lines] + colored_data = ''.join(colored_lines) + encoded_data = colored_data.encode('utf-8') + return encoded_data + + @classmethod + def factory_factory(cls, source_path): + """Factory factory for constructing protocols that know the source path for this CMake package.""" + def factory(label, job_id, stage_label, event_queue, log_path): + # factory is called by caktin_tools executor + def init_proxy(*args, **kwargs): + # init_proxy is called by asyncio + return cls(label, job_id, stage_label, event_queue, log_path, source_path, *args, **kwargs) + return init_proxy + return factory + + def colorize_cmake(self, line): + """Colorizes output from CMake + + This also prepends the source path to the locations of warnings and errors. + + :param line: one, new line terminated, line from `cmake` which needs coloring. + :type line: str + """ + # return line + cline = sanitize(line) + + if len(cline.strip()) == 0: + return cline + + if line.startswith('-- '): + cline = '@{cf}--@| ' + cline[len('-- '):] + if ':' in cline: + split_cline = cline.rstrip().split(':', 1) + cline = cline.replace(split_cline[1], '@{yf}%s@|' % split_cline[1]) + elif line.lower().startswith('warning'): + # WARNING + cline = fmt('@{yf}', reset=False) + cline + elif line.startswith('CMake Warning at '): + # CMake Warning at... + cline = cline.replace('CMake Warning at ', '@{yf}@!CMake Warning@| at ' + self.source_path + os.path.sep) + elif line.startswith('CMake Warning (dev) at '): + # CMake Warning at... + cline = cline.replace( + 'CMake Warning (dev) at ', '@{yf}@!CMake Warning (dev)@| at ' + self.source_path + os.path.sep) + elif line.startswith('CMake Warning'): + # CMake Warning... + cline = cline.replace('CMake Warning', '@{yf}@!CMake Warning@|') + elif line.startswith('ERROR:'): + # ERROR: + cline = cline.replace('ERROR:', '@!@{rf}ERROR:@|') + elif line.startswith('CMake Error at '): + # CMake Error... + cline = cline.replace('CMake Error at ', '@{rf}@!CMake Error@| at ' + self.source_path + os.path.sep) + elif line.startswith('CMake Error'): + # CMake Error... + cline = cline.replace('CMake Error', '@{rf}@!CMake Error@|') + elif line.startswith('Call Stack (most recent call first):'): + # CMake Call Stack + cline = cline.replace('Call Stack (most recent call first):', + '@{cf}@_Call Stack (most recent call first):@|') + + return fmt(cline, reset=False) diff --git a/catkin_tools/runner/__init__.py b/catkin_tools/jobs/commands/make.py similarity index 81% rename from catkin_tools/runner/__init__.py rename to catkin_tools/jobs/commands/make.py index 2b175b95..fc88c793 100644 --- a/catkin_tools/runner/__init__.py +++ b/catkin_tools/jobs/commands/make.py @@ -12,11 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os +from catkin_tools.utils import which -if os.name == 'nt': - from . import run_windows as run -else: - from . import run_unix as run - -run_command = run.run_command +MAKE_EXEC = which('make') diff --git a/catkin_tools/jobs/job.py b/catkin_tools/jobs/job.py new file mode 100644 index 00000000..8f900514 --- /dev/null +++ b/catkin_tools/jobs/job.py @@ -0,0 +1,247 @@ +# Copyright 2014 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +import stat + + +from catkin_tools.common import mkdir_p +from catkin_tools.common import get_cached_recursive_build_depends_in_workspace + +from catkin_tools.resultspace import get_resultspace_environment + +from catkin_tools.execution.jobs import Job +from catkin_tools.execution.stages import CommandStage + +from .commands.cmake import CMAKE_EXEC + +# Build Environment File +# ============= +# +# The Build Environment file is used to create environments to packages built +# in an isolated build scenario. This enables packages to build against other +# packages without sourcing the main workspace setup.sh file. +# +# Due to portability issues, it uses only POSIX-compliant shell features. This +# means that there is no support for BASH-like arrays, and special care needs +# to be taken in order to preserve argument atomicity when passing along to the +# `exec` instruction at the end. +# +# This involves forming a string called `_ARGS` which is composed of tokens +# like `"$_Ai"` for i=0..N-1 for N arguments so that with N=3 arguments, for +# example, `_ARGS` would look like `"$_A0" "$_A1" "$_A2"`. The double-quotes +# are necessary because they define the argument boundaries when the variables +# are expanded by calling `eval`. + +ENV_FILE_NAME = 'build_env.sh' + + +ENV_FILE_TEMPLATE = """\ +#!/usr/bin/env sh +# generated from within catkin_tools/verbs/catkin_build/common.py + +if [ $# -eq 0 ] ; then + /bin/echo "Usage: build_env.sh COMMANDS" + /bin/echo "Calling build_env.sh without arguments is not supported anymore." + /bin/echo "Instead spawn a subshell and source a setup file manually." + exit 1 +fi + +# save original args for later +_ARGS= +_ARGI=0 +for arg in "$@"; do + # Define placeholder variable + eval "_A$_ARGI=\$arg" + # Add placeholder variable to arg list + _ARGS="$_ARGS \\"\$_A$_ARGI\\"" + # Increment arg index + _ARGI=`expr $_ARGI + 1` + + ####################### + ## Uncomment for debug: + #_escaped="$(echo "$arg" | sed -e 's@ @ @g')" + #echo "$_escaped" + #eval "echo '$_ARGI \$_A$_ARGI'" + ####################### +done + +####################### +## Uncomment for debug: +#echo "exec args:" +#echo "$_ARGS" +#for arg in $_ARGS; do eval echo $arg; done +#echo "-----------" +##################### + +# remove all passed in args, resetting $@, $*, $#, $n +shift $# +# set the args for the sourced scripts +set -- $@ "--extend" +# source setup.sh with implicit --extend argument for each direct build depend in the workspace +{sources} + +# execute given args +eval exec $_ARGS +""" + + +def get_env_file_path(package, context): + """Get the path to a package's build environment file.""" + + return os.path.abspath(os.path.join(context.build_space_abs, package.name, ENV_FILE_NAME)) + + +def get_env_loaders(package, context): + """Get a list of env loaders required to build this package.""" + + sources = [] + # If installing to isolated folders or not installing, but devel spaces are not merged + if (context.install and context.isolate_install) or (not context.install and context.isolate_devel): + # Source each package's install or devel space + space = context.install_space_abs if context.install else context.devel_space_abs + # Get the recursive dependcies + depends = get_cached_recursive_build_depends_in_workspace(package, context.packages) + # For each dep add a line to source its setup file + for dep_pth, dep in depends: + source_path = os.path.join(space, dep.name, 'env.sh') + sources.append(source_path) + else: + # Just source common install or devel space + source_path = os.path.join( + context.install_space_abs if context.install else context.devel_space_abs, + 'env.sh') + sources = [source_path] # if os.path.exists(source_path) else [] + + return sources + + +def get_env_loader(package, context): + """This function returns a function object which extends a base environment + based on a set of environments to load.""" + + def load_env(base_env): + job_env = dict(base_env) + env_loader_paths = get_env_loaders(package, context) + # print('Loading resultspace envs from: {}'.format(env_loader_paths)) + for env_loader_path in env_loader_paths: + resultspace_env = get_resultspace_environment( + os.path.split(env_loader_path)[0], + quiet=True, + cached=True, + strict=False) + job_env = dict(job_env, **resultspace_env) + return job_env + + return load_env + + +def create_env_file(logger, event_queue, package, context, env_file_path): + """FunctionStage functor for creating a build environment file.""" + + source_paths = get_env_loaders(package, context) + + source_snippet = '. "{source_path}"' + sources = [source_snippet.format(source_path=source_path) for source_path in source_paths] + + # Populate the build env file template and write it out + env_file = ENV_FILE_TEMPLATE.format(sources='\n'.join(sources)) + if os.path.exists(env_file_path): + with open(env_file_path, 'r') as f: + if env_file == f.read(): + return 0 + with open(env_file_path, 'w') as f: + f.write(env_file) + + # Make the env file executable + os.chmod(env_file_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR) + + return 0 + + +def get_package_build_space_path(buildspace, package_name): + """Generates a build space path, does not modify the filesystem. + + TODO: Move to common.py + TODO: Get buildspace from context + TODO: Make arguments the same order as get_env_file_path + + :param buildspace: folder in which packages are built + :type buildspace: str + :param package_name: name of the package this build space is for + :type package_name: str + :returns: package specific build directory + :rtype: str + """ + return os.path.join(buildspace, package_name) + + +def create_build_space(logger, event_queue, buildspace, package_name): + """Creates a build space, if it does not already exist, in the build space + + :param buildspace: folder in which packages are built + :type buildspace: str + :param package_name: name of the package this build space is for + :type package_name: str + :returns: package specific build directory + :rtype: str + """ + package_build_dir = get_package_build_space_path(buildspace, package_name) + if not os.path.exists(package_build_dir): + os.makedirs(package_build_dir) + return package_build_dir + + +def makedirs(logger, event_queue, path): + """FunctionStage functor that makes a path of directories.""" + mkdir_p(path) + return 0 + + +def get_build_type(package): + """Returns the build type for a given package. + + :param package: package object + :type package: :py:class:`catkin_pkg.package.Package` + :returns: build type of the package, e.g. 'catkin' or 'cmake' + :rtype: str + """ + export_tags = [e.tagname for e in package.exports] + if 'build_type' in export_tags: + build_type_tag = [e.content for e in package.exports if e.tagname == 'build_type'][0] + else: + build_type_tag = 'catkin' + return build_type_tag + + +def create_clean_buildspace_job(context, package_name, dependencies): + + build_space = get_package_build_space_path(context.build_space_abs, package_name) + if not os.path.exists(build_space): + # No-op + return Job(jid=package_name, deps=dependencies, stages=[]) + + stages = [] + + stages.append(CommandStage( + 'rmbuild', + [CMAKE_EXEC, '-E', 'remove_directory', build_space], + cwd=context.build_space_abs)) + + return Job( + jid=package_name, + deps=dependencies, + stages=stages) diff --git a/catkin_tools/verbs/catkin_build/output.py b/catkin_tools/jobs/output.py similarity index 100% rename from catkin_tools/verbs/catkin_build/output.py rename to catkin_tools/jobs/output.py diff --git a/catkin_tools/make_jobserver.py b/catkin_tools/make_jobserver.py deleted file mode 100644 index 3ab82772..00000000 --- a/catkin_tools/make_jobserver.py +++ /dev/null @@ -1,318 +0,0 @@ -# Copyright 2014 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -from multiprocessing import cpu_count -from tempfile import mkstemp -from termios import FIONREAD - -import array -import errno -import fcntl -import os -import re -import subprocess -import time - -from catkin_tools.common import log -from catkin_tools.common import version_tuple - -JOBSERVER_SUPPORT_MAKEFILE = b""" -all: -\techo $(MAKEFLAGS) | grep -- '--jobserver-fds' -""" - - -def memory_usage(): - """ - Get used and total memory usage. - - :returns: Used and total memory in bytes - :rtype: tuple - """ - - # Handle optional psutil support - try: - import psutil - - psutil_version = version_tuple(psutil.__version__) - if psutil_version < (0, 6, 0): - usage = psutil.phymem_usage() - used = usage.used - else: - usage = psutil.virtual_memory() - used = usage.total - usage.available - - return used, usage.total - - except ImportError: - pass - - return None, None - - -class _MakeJobServer: - """ - This class implements a GNU make job server. - """ - - # Singleton jobserver - _singleton = None - - def __init__(self, num_jobs=None, max_load=None, max_mem=None): - """ - :param num_jobs: the maximum number of jobs available - :param max_load: do not dispatch additional jobs if this system load - value is exceeded - :param max_mem: do not dispatch additional jobs if system physical - memory usage exceeds this value (see _set_max_mem for additional - documentation) - """ - - assert(_MakeJobServer._singleton is None) - - if not num_jobs: - try: - num_jobs = cpu_count() - except NotImplementedError: - log('@{yf}WARNING: Failed to determine the cpu_count, falling back to 1 jobs as the default.@|') - num_jobs = 1 - else: - num_jobs = int(num_jobs) - - self.num_jobs = num_jobs - self.max_load = max_load - self._set_max_mem(max_mem) - - self.job_pipe = os.pipe() - - # Initialize the pipe with num_jobs tokens - for i in range(num_jobs): - os.write(self.job_pipe[1], b'+') - - def _set_max_mem(self, max_mem): - """ - Set the maximum memory to keep instantiating jobs. - - :param max_mem: String describing the maximum memory that can be used - on the system. It can either describe memory percentage or absolute - amount. Use 'P%' for percentage or 'N' for absolute value in bytes, - 'Nk' for kilobytes, 'Nm' for megabytes, and 'Ng' for gigabytes. - :type max_mem: str - """ - - if max_mem is None: - self.max_mem = None - return - elif type(max_mem) is float or type(max_mem) is int: - mem_percent = max_mem - elif type(max_mem) is str: - m_percent = re.search('([0-9]+)\%', max_mem) - m_abs = re.search('([0-9]+)([kKmMgG]{0,1})', max_mem) - - if m_percent is None and m_abs is None: - self.max_mem = None - return - - if m_percent: - mem_percent = m_abs.group(1) - elif m_abs: - val = float(m_abs.group(1)) - mag_symbol = m_abs.group(2) - - _, total_mem = memory_usage() - - if mag_symbol == '': - mag = 1.0 - elif mag_symbol.lower() == 'k': - mag = 1024.0 - elif mag_symbol.lower() == 'm': - mag = pow(1024.0, 2) - elif mag_symbol.lower() == 'g': - mag = pow(1024.0, 3) - - mem_percent = 100.0 * val * mag / total_mem - - self.max_mem = max(0.0, min(100.0, float(mem_percent))) - - def _obtain(self): - """ - Obtain a job server token. Be sure to call _release() to avoid - deadlocks. - """ - - while True: - # make sure we're observing load maximums - if self.max_load is not None: - try: - load = os.getloadavg() - if jobserver_running_jobs() > 0 and load[1] > self.max_load: - time.sleep(0.01) - continue - except NotImplementedError: - pass - - # make sure we're observing memory maximum - if self.max_mem is not None: - mem_used, mem_total = memory_usage() - mem_percent_used = 100.0 * float(mem_used) / float(mem_total) - if jobserver_running_jobs() > 0 and mem_percent_used > self.max_mem: - time.sleep(0.01) - continue - - # get a token from the job pipe - try: - token = os.read(self.job_pipe[0], 1) - return token - except OSError as e: - if e.errno != errno.EINTR: - raise - - def _release(self): - """ - Release a job server token. - """ - - os.write(self.job_pipe[1], b'+') - - -class _MakeJob: - """ - Context manager representing a jobserver job. - """ - - def __enter__(self): - if _MakeJobServer._singleton is not None: - _MakeJobServer._singleton._obtain() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if _MakeJobServer._singleton is not None: - _MakeJobServer._singleton._release() - return False - - -def _test_support(): - """ - Test if the system 'make' supports the job server implementation. - """ - - fd, makefile = mkstemp() - os.write(fd, JOBSERVER_SUPPORT_MAKEFILE) - os.close(fd) - - ret = subprocess.call(['make', '-f', makefile, '-j2'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - - os.unlink(makefile) - return (ret == 0) - - -def initialize_jobserver(*args, **kwargs): - """ - Initialize the global GNU Make jobserver. - - :param num_jobs: the maximum number of jobs available - :param max_load: do not dispatch additional jobs if this system load - value is exceeded - :param max_mem: do not dispatch additional jobs if system physical - memory usage exceeds this value - """ - - assert(_MakeJobServer._singleton is None) - - # Check if the jobserver is supported - supported = _test_support() - - if not supported: - _MakeJobServer._singleton = None - log('@{yf}WARNING: Make job server not supported. The number of Make ' - 'jobs may exceed the number of CPU cores.@|') - return - - # Create the jobserver singleton - _MakeJobServer._singleton = _MakeJobServer(*args, **kwargs) - - -def jobserver_job(): - """ - Get a job from the jobserver. - - This is meant to be used with a context manager. - """ - return _MakeJob() - - -def jobserver_arguments(): - """ - Get required arguments for spawning child make processes. - """ - - if _MakeJobServer._singleton is not None: - return ["--jobserver-fds=%d,%d" % _MakeJobServer._singleton.job_pipe, "-j"] - else: - return [] - - -def jobserver_running_jobs(): - """ - Try to estimate the number of currently running jobs. - """ - - if _MakeJobServer._singleton is None: - return '?' - - try: - buf = array.array('i', [0]) - if fcntl.ioctl(_MakeJobServer._singleton.job_pipe[0], FIONREAD, buf) == 0: - return _MakeJobServer._singleton.num_jobs - buf[0] - except NotImplementedError: - pass - except OSError: - pass - - return _MakeJobServer._singleton.num_jobs - - -def jobserver_max_jobs(): - """ - Get the maximum number of jobs. - """ - - if _MakeJobServer._singleton is not None: - return _MakeJobServer._singleton.num_jobs - else: - return 0 - - -def jobserver_supported(): - """ - Returns true if the jobserver exists. - """ - return _MakeJobServer._singleton is not None - - -def set_jobserver_max_mem(max_mem): - """ - Set the maximum memory to keep instantiating jobs. - - :param max_mem: String describing the maximum memory that can be used on - the system. It can either describe memory percentage or absolute amount. - Use 'P%' for percentage or 'N' for absolute value in bytes, 'Nk' for - kilobytes, 'Nm' for megabytes, and 'Ng' for gigabytes. - :type max_mem: str - """ - - if _MakeJobServer._singleton: - _MakeJobServer._singleton._set_max_mem(max_mem) diff --git a/catkin_tools/resultspace.py b/catkin_tools/resultspace.py index 3a64f20e..c57ddcf2 100644 --- a/catkin_tools/resultspace.py +++ b/catkin_tools/resultspace.py @@ -12,10 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +try: + from md5 import md5 +except ImportError: + from hashlib import md5 import os import re -from .runner import run_command +from osrf_pycommon.process_utils import execute_process + from .common import string_type from .utils import which @@ -25,12 +30,13 @@ # Cache for result-space environments _resultspace_env_cache = {} +_resultspace_env_hooks_cache = {} -def get_resultspace_environment(result_space_path, quiet=False, cached=True): +def get_resultspace_environment(result_space_path, quiet=False, cached=True, strict=True): """Get the environemt variables which result from sourcing another catkin workspace's setup files as the string output of `cmake -E environment`. - This command is used to be as portable as possible. + This cmake command is used to be as portable as possible. :param result_space_path: path to a Catkin result-space whose environment should be loaded, ``str`` :type result_space_path: str @@ -38,12 +44,26 @@ def get_resultspace_environment(result_space_path, quiet=False, cached=True): :type quiet: bool :param cached: use the cached environment :type cached: bool + :param strict: require the ``.catkin`` file exists in the resultspace + :type strict: bool :returns: a dictionary of environment variables and their values """ + # Get the MD5 checksums for the current env hooks + # TODO: the env hooks path should be defined somewhere + env_hooks_path = os.path.join(result_space_path, 'etc', 'catkin', 'profile.d') + if os.path.exists(env_hooks_path): + env_hooks = [ + md5(open(os.path.join(env_hooks_path, path)).read()).hexdigest() + for path in os.listdir(env_hooks_path)] + else: + env_hooks = [] + # Check the cache first - if cached and result_space_path in _resultspace_env_cache: + if (cached + and result_space_path in _resultspace_env_cache + and env_hooks == _resultspace_env_hooks_cache.get(result_space_path, [])): return _resultspace_env_cache[result_space_path] # Check to make sure result_space_path is a valid directory @@ -57,7 +77,7 @@ def get_resultspace_environment(result_space_path, quiet=False, cached=True): # Check to make sure result_space_path contains a `.catkin` file # TODO: `.catkin` should be defined somewhere as an atom in catkin_pkg - if not os.path.exists(os.path.join(result_space_path, '.catkin')): + if strict and not os.path.exists(os.path.join(result_space_path, '.catkin')): if quiet: return dict() raise IOError( @@ -115,7 +135,7 @@ def get_resultspace_environment(result_space_path, quiet=False, cached=True): env_dict = {} try: - for line in run_command(command, cwd=os.getcwd()): + for line in execute_process(command, cwd=os.getcwd()): if isinstance(line, string_type): matches = env_regex.findall(line) for (key, value) in matches: @@ -127,6 +147,7 @@ def get_resultspace_environment(result_space_path, quiet=False, cached=True): return {} _resultspace_env_cache[result_space_path] = env_dict + _resultspace_env_hooks_cache[result_space_path] = env_hooks return env_dict diff --git a/catkin_tools/runner/run_unix.py b/catkin_tools/runner/run_unix.py deleted file mode 100644 index ef8abc80..00000000 --- a/catkin_tools/runner/run_unix.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2014 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import os -import pty -import select -import sys -import time - -from subprocess import Popen -from subprocess import STDOUT - - -def process_incomming_lines(lines, left_over): - if not lines: - return None, left_over - if lines[-1].endswith(b'\n'): - data = b''.join(lines) - left_over = b'' - else: - data = b''.join(lines[:-1]) - left_over = lines[-1] - return data, left_over - - -def run_command(cmd, cwd=None): - master, slave = pty.openpty() - - p = None - while p is None: - try: - p = Popen(cmd, stdin=slave, stdout=slave, stderr=STDOUT, cwd=cwd, universal_newlines=True) - except OSError as exc: - if 'Text file busy' in str(exc): - # This is a transient error, try again shortly - time.sleep(0.01) - continue - raise - if sys.platform.startswith('darwin'): - os.close(slave) # This causes the below select to exit when the subprocess closes - - left_over = b'' - - # Read data until the process is finished - while p.poll() is None: - incomming = left_over - rlist, wlist, xlist = select.select([master], [], [], 0.01) - if rlist: - incomming += os.read(master, 1024) - lines = incomming.splitlines(True) # keepends=True - data, left_over = process_incomming_lines(lines, left_over) - if data is None: - continue - try: - yield data.decode() - except UnicodeDecodeError as exc: - yield unicode(data, errors='ignore') - - # Done - os.close(master) - yield int(p.returncode) diff --git a/catkin_tools/runner/run_windows.py b/catkin_tools/runner/run_windows.py deleted file mode 100644 index 11e654b6..00000000 --- a/catkin_tools/runner/run_windows.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2014 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import os -import select - -from subprocess import PIPE -from subprocess import Popen -from subprocess import STDOUT - - -def run_command(cmd, cwd=None): - p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT, cwd=cwd) - - left_over = b'' - - while p.poll() is None: - incomming = left_over - rlist, wlist, xlist = select.select([p.stdout], [], []) - if rlist: - incomming += os.read(p.stdout.fileno(), 1024) - lines = incomming.splitlines(True) # keepends=True - if not lines: - continue - if lines[-1].endswith('\n'): - data = b''.join(lines) - left_over = b'' - else: - data = b''.join(lines[-1]) - left_over = lines[-1] - try: - yield data.decode() - except UnicodeDecodeError: - yield unicode(data, errors='ignore') - # Done - yield p.returncode diff --git a/catkin_tools/verbs/catkin_build/.build.py.swn b/catkin_tools/verbs/catkin_build/.build.py.swn new file mode 100644 index 0000000000000000000000000000000000000000..bde855773a47cd780eac3883f3c29513c0792cf6 GIT binary patch literal 40960 zcmeI54UlA4b>ANnA8Ujqj4-AQCU1L$x>wBh?n(q%BZlnP2n~{US3A4l?dk-S+yn$)eZT4ELN(x@n``O6q%R)!u5O zRq3_c%{2dmG>iVyV!zR>@6rFuosI2F8Q&dqV9bFb4)l9VyMOfiqZ_W@w^!~>6n4f} zKj-97vhjs62gV#2b70JYF$cyR7;|9EfiVZh9QdcpfnH}v^dQx{%vSM6`}ZjWf1hf9 zUvK~Z^MU){XMZ2Fe^&;*b2M+?d9>GF$cyR7;|9EfiVZh92j$8%z-fn#vB-PV9bFr2mVQNpjwZj zYcbAWBnCX||IYsZk5@&}e*mus2f_2f^T3y`jH1WE&w+cvmEh6ukD~X3_knrvYH$o3 z1~-A-U;-4tcb*kRe+C`{p9LQQZwC*79=Hd*9NY|E2A&E&aYYop7yJyE1J3~e?wL{a zG4N6FdhiqA9&k6f7JQfbd>XtBG{LRlp8=`o<>1TI`BUKc!27^M;B{aL+zxI7*Mb*= zYrq63fUn@Fcn5epI12tZj)@O~p9Sv*cY$f}*El5p1iS$>!31~?cqaH8oGBj${}LPn zH-j(Wpm-x#20Osx_$NLN-UMz0SA+kCv*Hth!y@K05g$)_{cbDntt4?PS&zF(wZ2j5 zRBNl%=}G<&&|$-Ig%CVc@k0PJq{8m*uiX%fp2(=711#M3S6RXNjMOv|guMq1hxza(p^U8bp|nQ~oH z+R5f+d!Dbm=ddu*b>!9MU2%}f>T;NY;dFi^WGzrA6tr4X-QGqgnTnf@v}Z5qC`-+y zxb15P1;OZvdwoVqYTF}SY4@8jJ5e^;j3nc%&S4RqYPY&Jcq=F>m3rix-n9El^(;js zNo%N_kmFNvy-~A7n450k`YeQAGFa$ z=+Gc4bLyI(kz2hut3PZGS)--6RNV7JOc^v8EBmPvO{E#m_L6S3M=$8Dbld&q6>T%e zhRHZh5@8|(q}qZFDn2tVXp5Sv(;1Cy_Y%{!hL(t|71dBG{ zw0y^RG7I4xR(LY2B4biMOWpQbTyFP!ojw9+t<&!I;-mWO5Cgv}@}=ui(Q90iLw8hH zlUKDD?epOsjr)^Z_Fi%h(bJ=qZ0@^+bQlh7BmO`1kp>Gd`)SSEdWt=g;f8fytNQL|UAAZWUlN!w&U z^xOqYx`pPuyvV!_tAv$|X`w%7sr|Q zQ4EAsMMYF0UaB^lNqs7QaeAsH9Xd|?Dpx3Dqun>dztKv2hydhSQOOe$i?d4hOt;(a zPQ?WsLm8>%p^{uRO%CZsT29V2dZj2iS4%p*ILIxorg36tOBtCLlj~4%e(qF)2dQ)N z$`Q5HXw|bPdfg2KgH%$VyHhkcmvUyULnj`6PZR3BiTKz?Z>8P3enj*DdjZBlpI)%^o>C z87B>yUgP9krwjW;6O~FflDZItQ0R3Vi+v0StbzfGWo(8C7?{;oTsSZn&(0O%g9qkj z=O)SHWDCxTUV2}@A=<*?MvU3gL^2?5sL{HVSzYcX++&Ge8Od-klauVYAvVqb1|m-mpDa4_9-`|P2aBXcvmuP^V_NA6{8uT`0$7kgFm ztuiTg{Cw@0Op8yb6p)l^Ev3>5RBj2O0b_LNI;EjeZPn1+VcKBh+5AFA9Z(uwb56#0 znV|U?`r?u_Y{5QM*mbiKdU1g*7IZb~$5pASwXZOkYy6emZWI`3!Pb6h-*4FA} zP(gg3*DL#X?%!NpIzJH~GWxXo0T60+WO5CqeS?A@*lf0qzIL8bEo9!JdJ|-d$t4?j zoRt$7_ixsg&Tm>OptOihR;%_^cFL64Y_`{BGPe4A=ED*Cy4-1qm=c2fwh(9%BJv0d%fXI=V?1CF;r#g=eeu9WfwNHR~vT8b{%T}h|;QsFO3 zYpXt8Jk>1@KZL&7QBzTBh&yz#+KpV9?r+e^+$H&&?}<@(%YB#e<1mCv`T>*=Dd5N}=&*KRm3Q=Dmog>3SS zq?o}EImR#X*~!|&`FMA{DUJ;>5YI2fSq9f#hwrN*G*r9G>2Svr=^dsJA)eyXN$Mpt zG+oUmWBlgR&Fb1>y^5=E8Hcp|KVNM1*A|m*alty#Lt-(ty9_=tIo2d&b!&*@kzOuE z#OB#3d?x&!tLRQYPlfff-8HXW3|CWf-FBjK#WFZTLFO*YUtMuBH{(t_m1?!q%z@*Q z3em1{jPkKn+qedshO$w9wUV@ES^i_WVz*)17DC|s=qd|MMORF5N?35Q1!-5>CRgcp z2B~uG5eK0TFe&M9F@`!m2Ku(85lHC zwm!vtxyAz3Fok(OWot(+WBHp&S8>4j$t1S!#}s{oef8lJcKT*YFv+cZElh&1H=C&E zRUO1bXx!%Lvcpw$H`5V`%V*k+7W1aq|My@szZ{!d?0*;E|3z&5cY+@S3qb7u8E`GQ z3_JyV8TjHd3w#pX2Tp*ega3|y zK-T)-3VsrF!9H*$_#=D-zX4tic7U(oA9y3Eft$ehfnTQ{Zv&@+)ah#QKdIlNVEh?# zV9bFr2gV#2bKw6c4u}%qq*2}Eq7#xjAAI06Oc%Ce!lwew z+Nwk&LZ&jMY#e_!jz5!rIAn~_d90N%jz1g6pAiW=h39GW{;Y{p z!G??AW*i-tCUM0+&bA_MAsfDz3nw-)Q3KItml%?~WghAf)1@S}|6hnQbWY;RvHy=* z#pvtU_dgC+!5MHI90Uiz3&7>zudwTX6NtV4dT<&X2H(K8|4-oGf{%edI1T2&)!;Jl zN7(wm2F`#<@DlJXZ2Uh3Zv^|n4}fQbFJS9G05-uaxEx#t{s4RbUa${53w#b+|2^Oo z*aOPo@3H$|2VM*A0M7*yuipcQ!4B|MVgNo39tN)kF9TPB_pW@E&j$>;Zod|2_qN4hS#T!FC^2o`U}-;#c92(HOOEvPOUo@oQ0V-+wi(Pp3So z7ihG9@S@jiBx|Ni<51Fd&mx*y+HMsWVzbgHK`g9Z2Fs0(9M!*A>U6Fq){i8T$OgV> zmse^x(?rpA6s9JqJ90G` z{3~vFjVvn@>sk`m~sK4$(bCLs&SzP~{xrE!=}r%KsdUHTDdbuwz|bos_EAl1gojXv<;l=d+_wBT;ij zy7e!NA$4+?`NVxv4D*20P+P={Jgm|??Swq%IO*obNZb!5pS zyyOCL2Cv)5AQlO8rEW#(AQhviy`)J-#Wxa~IigiWP&aiXo1H6l1g!}R$P3CGrP6sX zwR4GsLg!NvuC_|qtYCYE4$YyWE^4YJfI{}QwRKt%QEi?Y$}HEe8W}a%&BG@A9<-{` zFse;MO4+us9hqwc=MqtTsXdBZFI2FONLl{YG2}(TMf2#=y0|TEE23n~jwC2wigeG5 z7BkFS!NT%TciA={%MQQ(NnY-fiCGs)!cXq5L@n%#!rNO(fS}%^SfhIr?Dx$V)tNC56Hf%_@9SqN7 zjz$^zRaKeg^Bn?cSES)A=PV@D!Zq^$di7{?^99j$7kp> zGeLh|0X0MtYOk??vI)%JGP}3TGBjH<&8`H0+wg>)1tEpfMqBBt+v})Lie8$LBW%vN zYj_~>ZCQ#$c8Y{c>}-COOL;xm*EY~s=ycM8{+8^17_u_7^K?xIaO#(TLRe^G5d9?? z1%oP)?3>$ej_K$(z0UrR1U>4j6UIzXO8~^j*m%-b>dGIel8T>Eo{x5?T zI0X)X{oq<4djh_JAK+hsHE=oj5J2XdDZE-=SP5`^$Te4|sOeZG;Tb00>eyAki4wxK}6mb{#IRc7D}@TJr4l=cn^ zp?sPwtNe9BYkC;yr6w;HnTLx~!YI_s&ormU6Ofr`UqM6%$)!of&20Y}GR&CgY;{#UrVWKOT_$o9DHC@`GtQ zHgWR#*jow=LyALJhv{nC!lWA1GbiG^s@;a{pktdFHSTp;dle^zrA4hF<%3Y$Gw)rI z`rkaTp&uTN$0m!=9WQ%rf)zoKe zb%yz0$k@Vk9O_C?;(LIM2JLcOG!wO4k&oFmh+}7kjdbmnoLN8|BNR&XqP8PEF_yQC zC$o{1SZY$F^wr?#2ibWVX`Y(xiKB;)PSK6oN-Tqk9!9Jo+eF!hA*42&jb+h&Q*j8I zjm$M%s#Uhf6E7ixCYTo*`A(0AmvEHeI{c(%aRb4tjeCEG{Xf#~&C28=`)iZ> zjPAMi1zQ^Zo?ZfZx2M*t?lB*c0J;;qFlV;X`jBbILNiH9x8P6);o@@AE&DlBO1Cn| z5_^|~u@2BECCTT~O6y4sG#g&eCL_x;S*o~K9=zQ326w}92AYirJ90NHe@x%;?3joT z=Niq%_=?xV1?g)J;n-zd$!=@Ia~aHTH;vi%;LaGZB4k1nt5~v`uz{K5bu?S3HPa4x z-YI*|{X-{mo^x4*c$JJoJ+7if7qW{fnW!0=gWUG!oKeErTi-I9Vw~)n5fiYeo1GbY zd=UecE&&gvoXoeBXNmts$yUg#)xrS<9XW_6$3v|xo3tBC8^-a#$qK!dN?Vso zia)lAQ7p333Fz#4I4iK*%_Rsg>n0Q|10^fMJtDoE1r`~B{`egwbBpPiYI3JE zuTzxM8ns=$x<+=R=&|m}QMbr6Sj9I7H26xFWcQxmVH4KSjy~DzgiuAA$#EI6{fsKd zDSxBx1?-X>bxr`@F-p@D1cO0N3@{ zp;z}_R7b>cpHuG1RN@q@w9g?gT^tE6qdC3MxZh4JW>G2ARMFC8?nT*5eI7GDhjU#5TqLcjxba37h{@;QioX z@DR8O{3y5)MBp9R{yi`cX2A~d4Dh?w2e6;NH-jl4{s1}m?{~lh;0M5OWBWe@UI_jR zcK#Q^W8gQzhr!Q-H-WQY85{#QffA7O0=|h4;J<>$zz4yvf(0P40Kb7hpbLsX&hdL3 zf514Q5c>7M_?+Fg~DyK?}?6hWB2Whoijqczk?eM}d z6258_Tibcm24G54#YX!S#YajN|7CsiZaL9n_wH7Ex5%GKb09{ay;y)tHT7H>}CS8w<`pA0~c8l8)>|GR?bW zfauDsB|A?hbqY$=vov*OMRa!B!|1euT4^@O1~qmh4%s;W2r{KHDA~^0Q}I-1W2)A~(3m>SAshacxn+GzZrNT^!9?(Y2GNucnZKt) zw)y<*7pXlCN#jT`!c*GJqSBbqknAc2DNVYUnahSIif-XYX*J{^L7^iP0y(PW z%tj98knGU$IEHIkB&KHQDTfX{R7y5YyPB(NuT#uoRZKqQZLWI@r9ns14D|@GB{nfg z1RX&_v^JIDd=S1^Zu#tOUks-T%O9LUoH?c$GMODzPBB>zHk7nyL(hhwu*frM@Q}z3Z>pdTF9ZT{#AOj&;boB{)8c0~MOir#rpQKhkj`-@xMX|pWF*UT z+HEVQ)*Rj8jnfj^Ry^wC&P2#cLyeD%#%t?J3y(a>Wl})8+II3qK|_zxA|Ah-Kodb^ zv=!HdWVe1_&w!i4blAtrN{+@%r8;Ib3HvmRM!!;mcN^)ZCIE+{_{Wnb^FUT5B?LO> z{Q`>QaY57DC|s_>mrM@45e5wMC-Y5Z1O{IDOC&sWbS!wiH_KV(KcT`bX-F4&!aXUB zEIr?ESCrIN+Ht`$j3eVXg{O74fj%aUtkJNhM)UZ6%JZF zL!w&x?&cIaV*|=B!YqAm#+}GPJk&nxnCqxiGC=5_(ibJ?;_lrxr=rz-$s?Lzg^A-i zk)Uun|)dzc0Z0CotwQ!*Z-eOb$&|hQ|y2De!h=l-+u&r7`y@839bYm z$G%U&kATa-=dkOSz)|ob@ORktzYkWyNpL;*3+#J2!~a3>JRmmyTR|U)JumzCe+z7a zQ{ace)4|_jw|^9T1l$K^!4B{z*zdB={}%8Nkof-Fz(Mc~@Hg1=9|jMD_kacPN^mpy zLGUm9lJ6N>+$aU;=*a1RwTR8B(55(XHO`J`E*_xr`nm`^#(tYx*=5qGmk38q-83_lZIp5HgM5FDPR z2DyD?I+t$m!en^BVx-4j8CxiIWt>MW9l+9QwbxbgaPyHa0wvkT7A#`FA5KCLayAvS zLRhfmIV9>(Y=i6wil_!|285@yzgLMO$Uv8luh}0LcJDTeECtEc1`xVyqNyvTEEnlo zx#Z%k1EZdadXYxyN@J-PUn8ds+ige2;b0d9B_E7ha!Jfyifi^y#@Ad2CP0h&geJiL@W55}IXOtGvpjW0oUaL8|3&xw_2Z&C94Z#%qVCQZ_~z z-7T;Fo+W0b-6!_sNm5na$VA$pav3dEDdBj1oh&Xz3~eWbLJ{P|!-#?FLgR{MIh?cT ztyb+FN>JW}a?_$+Brv1Iq|2tmlOB#f^J&SCvgM*bk1IOuLV8<{GrVWR$laF{;y%6wgrB?7+UjcDg z5|dF$`-^%dot6a)lj(#6{INhJ&B&{Ss=OLO!|?Fq;1JOsAmiAVZ5x#Vj_=hhUrfXr z%MiGDH8UryghRP3q6>K;Ts741Bpt~fpX_oB11z=;)fnWv+W!+6qYsKriv54g8m3>y z)|VK7zW{Frv*1>+1IRl7-U%K6CGgkS{Sy1X23`T=J%1AW|1}_M{*Qn+gB4H&mxJHM z5Ad@<_5qv*2f&R$Vga5CWM9B#;M4dA?gty72(AQQ#W(OyZ~`0zQ=kC;0N=m|z?*=q z`$yn|_yc|wyb*|h;F;jh@d5lBAnX5a@MGZl;A8jzWFNpO@Ko^Y*!~+p*8RVYjsHpT zli(0|Huws*{rkYH!QaBakAn{Z;pJ<B~dSC26xz68YsmqjJ#r93RVs zG-sTTIDHnuqIW1zCdhx(OYa6L&4jEYa3X%CRk5@AF(-FKY6W^cDypkfg3+a7U)Tmfk=I$Q23*PTz;9Nr0UCHMlx^*E1Eq*=%T!%MTfS_P&5O-zRCA9 z5)~n1-^C|5QnDynS4a2PW##I}d(({E9cHnuolh}R6zh~891icwks8aB@{ldF(q24M z4yKo^uC}BiuUeCLsYuX6Q68MfDjWEi0c#Xq{`&u%$ z>~v1k*rJ*lkF^WhWuUB0f|lDN2UzIlXB&4Z+k(_D&Cu92;^l~gecDda!Vj6sSe3_j zs2kUUDb3c?hT~|=22|$fHOV0xZkBZSU^sZ4{MjZ1k!ty!C@u;)Kh{hWk9zB(TQG1% zlh|mj* z$ZFYejm)6Xfe!0Y&SV^i@m3nC6$ypI57QX`w>qJ+OByvh3S0|wfo1M}q4+IqfZUYP z(B#-Lxh8HG&(&~F(2xN+!Q9=(%OkV2W^ZD5Y)5V z1oSA`eG+W3=2tfJn%!EGNIu~?B2&W~H;X*VVeR~HA|T8|JZ9PB_Vb%Zvos-5ZWdJ4 zMK)OF4`?TEuDSPax>##53B)$DJxRsG@cB9#L7;|bb_kOm!NcBvN2>g_Ww;a6F0BxH z#b6PdQBX#BUlr2qf-B23fSDlG=**@`T>ioPuMOpc!&Ih6tx`5hjWMooIQAVtr%{tm zu3P=fi`0y))^LHuUY+Ka2 ziAC 0: + # Add the unbuilt packages + packages.extend(list(unbuilt_pkgs)) + else: + log("[build] No unbuilt packages to be built.") + return + + # If no_deps is given, ensure packages to build are provided + if no_deps and packages is None: + log(clr("[build] @!@{rf}Error:@| With no_deps, you must specify packages to build.")) + return + + # Find list of packages in the workspace + packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built( + packages, context, workspace_packages) + if not no_deps: # Extend packages to be built to include their deps packages_to_be_built.extend(packages_to_be_built_deps) - # Also resort - packages_to_be_built = topological_order_packages(dict(packages_to_be_built)) + + # Also re-sort + try: + packages_to_be_built = topological_order_packages(dict(packages_to_be_built)) + except AttributeError as err: + log(clr("[build] @!@{rf}Error:@| The workspace packages have a circular " + "dependency, and cannot be built. Please run `catkin list " + "--deps` to determine the problematic package(s).")) + return + # Check the number of packages to be built if len(packages_to_be_built) == 0: log(clr('[build] No packages to be built.')) return - max_package_name_length = max([len(pkg.name) for pth, pkg in packages_to_be_built]) if packages_to_be_built else 0 # Assert start_with package is in the workspace - verify_start_with_option(start_with, packages, all_packages, packages_to_be_built + packages_to_be_built_deps) - - # Setup pool of executors - executors = {} - # The communication queue can have ExecutorEvent's or str's passed into it from the executors - comm_queue = Queue() - # The job queue has Jobs put into it - job_queue = Queue() - # Lock for install space - install_lock = Lock() if lock_install else FakeLock() - # Determine the number of executors - try: - if jobs: - jobs = int(jobs) - if jobs < 1: - sys.exit("Specified number of jobs '{0}' is not positive.".format(jobs)) - except ValueError: - sys.exit("Specified number of jobs '{0}' is no integer.".format(jobs)) + verify_start_with_option( + start_with, + packages, + all_packages, + packages_to_be_built + packages_to_be_built_deps) + + # Remove packages before start_with + if start_with is not None: + for path, pkg in list(packages_to_be_built): + if pkg.name != start_with: + wide_log("[build] Skipping package '{0}'".format(pkg.name)) + packages_to_be_built.pop(0) + else: + break + + # Construct jobs + jobs = [] + packages_to_be_built_names = [p.name for _, p in packages_to_be_built] + for pkg_path, pkg in all_packages: + if pkg.name == 'catkin': + continue + if pkg.name not in packages_to_be_built_names: + continue + # Ignore metapackages + if 'metapackage' in [e.tagname for e in pkg.exports]: + continue + + # Get actual execution deps + deps = [p.name for _, p in get_cached_recursive_build_depends_in_workspace( + pkg, packages_to_be_built) if p.name != 'catkin'] + + # Create the job based on the build type + build_type = get_build_type(pkg) + build_job_kwargs = dict( + context=context, + package=pkg, + package_path=pkg_path, + dependencies=deps, + force_cmake=force_cmake, + pre_clean=pre_clean) + build_job = None + + for entry_point in pkg_resources.iter_entry_points(group='catkin_tools.jobs'): + if build_type == entry_point.name: + loaded_ep = entry_point.load() + build_job = loaded_ep['create_build_job'](**build_job_kwargs) + + if build_job is not None: + jobs.append(build_job) + else: + wide_log(clr("[build] @!@{yf}Warning:@| Skipping package '{}'" + " because it has an unknown package build type: \"{}\"").format( + pkg.name, build_type)) + + # Queue for communicating status + event_queue = Queue() + try: - jobs = cpu_count() if jobs is None else jobs - except NotImplementedError: - log('Failed to determine the cpu_count, falling back to 1 jobs as the default.') - jobs = 1 if jobs is None else jobs - # If only one set of jobs, turn on interleaving to get more responsive feedback - if jobs == 1: - # TODO: make the system more intelligent so that it can automatically switch to streaming output - # when only one job is building, even if multiple jobs could be building - quiet = False - interleave_output = True - # Start the executors - for x in range(jobs): - e = Executor(x, context, comm_queue, job_queue, install_lock, continue_on_failure) - executors[x] = e - e.start() - - try: # Finally close out now running executors - # Variables for tracking running jobs and built/building packages - start = time.time() - total_packages = len(packages_to_be_built) - package_count = 0 - running_jobs = {} - last_status_update_time = time.time() - limit_status_period = (1.0 / limit_status_rate) if limit_status_rate else 0 - log_dir = os.path.join(context.build_space_abs, 'build_logs') - color = True - if not force_color and not is_tty(sys.stdout): - color = False - out = OutputController(log_dir, quiet, interleave_output, - color, max_package_name_length, prefix_output=(jobs > 1)) - if no_status: - disable_wide_log() - - # Prime the job_queue - ready_packages = [] - failed_packages = [] - if start_with is None: - ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages) - while start_with is not None: - ready_packages.extend(get_ready_packages(packages_to_be_built, running_jobs, completed_packages)) - while ready_packages: - pth, pkg = ready_packages.pop(0) - if pkg.name != start_with: - completed_packages.append(pkg.name) - package_count += 1 - wide_log("[build] Skipping package '{0}'".format(pkg.name)) - else: - ready_packages.insert(0, (pth, pkg)) - start_with = None - break - running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake) - assert running_jobs - - error_state = False - errors = [] - - def set_error_state(error_state): - if error_state: - return - # Set the error state to prevent new jobs - error_state = True - # Empty the job queue - while not job_queue.empty(): - job_queue.get() - # Kill the executors by sending a None to the job queue for each of them - for x in range(len(executors)): - job_queue.put(None) - if x in executors: - executors[x].should_shutdown = True - - # While any executors are running, process executor events - while executors: - try: - # Try to get an event from the communications queue - try: - event = comm_queue.get(True, 0.1) - except Empty: - # timeout occured, create null event to pass through checks - event = ExecutorEvent(None, None, None, None) - - if event.event_type == 'job_started': - package_count += 1 - running_jobs[event.package]['package_number'] = package_count - running_jobs[event.package]['start_time'] = time.time() - out.job_started(event.package) - - if event.event_type == 'command_started': - out.command_started(event.package, event.data['cmd'], event.data['location']) - - if event.event_type == 'command_log': - out.command_log(event.package, event.data['message']) - - if event.event_type == 'command_failed': - out.command_failed(event.package, event.data['cmd'], event.data['location'], event.data['retcode']) - # Add to list of errors - errors.append(event) - - if event.event_type == 'command_finished': - out.command_finished(event.package, event.data['cmd'], - event.data['location'], event.data['retcode']) - - if event.event_type == 'job_finished': - completed_packages.append(event.package) - run_time = format_time_delta(time.time() - running_jobs[event.package]['start_time']) - out.job_finished(event.package, run_time) - del running_jobs[event.package] - # If shutting down, do not add new packages - if error_state: - continue - # Calculate new packages - if not no_status: - wide_log('[build] Calculating new jobs...', end='\r') - sys.stdout.flush() - ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages, - failed_packages) - running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake) - # Make sure there are jobs to be/being processed, otherwise kill the executors - if not running_jobs: - # Kill the executors by sending a None to the job queue for each of them - for x in range(jobs): - job_queue.put(None) - - if event.event_type == 'job_failed': - failed_packages.append(event.package) - run_time = format_time_delta(time.time() - running_jobs[event.package]['start_time']) - out.job_failed(event.package, run_time) - del running_jobs[event.package] - # if the continue_on_failure option was not given, stop the executors - if not continue_on_failure: - set_error_state(error_state) - # If shutting down, do not add new packages - if error_state: - continue - # Calculate new packages - if not no_status: - wide_log('[build] Calculating new jobs...', end='\r') - sys.stdout.flush() - ready_packages = get_ready_packages(packages_to_be_built, running_jobs, completed_packages, - failed_packages) - running_jobs = queue_ready_packages(ready_packages, running_jobs, job_queue, context, force_cmake) - # Make sure there are jobs to be/being processed, otherwise kill the executors - if not running_jobs: - # Kill the executors by sending a None to the job queue for each of them - for x in range(jobs): - job_queue.put(None) - - # If an executor exit event, join it and remove it from the executors list - if event.event_type == 'exit': - # If an executor has an exception, set the error state - if event.data['reason'] == 'exception': - set_error_state(error_state) - errors.append(event) - # Join and remove it - executors[event.executor_id].join() - del executors[event.executor_id] - - if not no_status: - # Update the status bar on the screen - executing_jobs = [] - for name, value in running_jobs.items(): - number, start_time = value['package_number'], value['start_time'] - if number is None or start_time is None: - continue - executing_jobs.append({ - 'number': number, - 'name': name, - 'run_time': format_time_delta_short(time.time() - start_time) - }) - msg = clr("[build - {run_time}] ").format(run_time=format_time_delta_short(time.time() - start)) - # If errors post those - if errors: - for error in errors: - msg += clr("[!{package}] ").format(package=error.package) - # Print them in order of started number - for job_msg_args in sorted(executing_jobs, key=lambda args: args['number']): - msg += clr("[{name} - {run_time}] ").format(**job_msg_args) - - if jobserver_supported(): - msg_rhs = clr("[{0}/{1} Jobs | {2}/{3} Active | {4}/{5} Completed]").format( - jobserver_running_jobs(), - jobserver_max_jobs(), - len(executing_jobs), - len(executors), - len(packages) if no_deps else len(completed_packages), - total_packages - ) - else: - msg_rhs = clr("[{0}/{1} Active | {2}/{3} Completed]").format( - len(executing_jobs), - len(executors), - len(packages) if no_deps else len(completed_packages), - total_packages - ) - - # Update title bar - sys.stdout.write("\x1b]2;[build] {0}/{1}\x07".format( - len(packages) if no_deps else len(completed_packages), - total_packages - )) - # Update status bar - # If the status_rate is zero, always do the status update - do_status_update = (limit_status_rate == 0) - # Otherwise calculate the time delta - if not do_status_update: - if (time.time() - last_status_update_time) >= limit_status_period: - last_status_update_time = time.time() - do_status_update = True - # Conditionally do the status update - if do_status_update: - wide_log(msg, rhs=msg_rhs, end='\r') - sys.stdout.flush() - except (KeyboardInterrupt, EOFError): - wide_log("[build] User interrupted, stopping.") - set_error_state(error_state) - # All executors have shutdown - sys.stdout.write("\x1b]2;\x07") - if not errors: + # Spin up status output thread + status_thread = ConsoleStatusController( + 'build', + ['package', 'packages'], + jobs, + event_queue, + show_notifications=not no_notify, + show_active_status=not no_status, + show_buffered_stdout=not quiet, + show_stage_events=not quiet, + show_full_summary=(summarize_build is True), + pre_start_time=pre_start_time, + active_status_rate=limit_status_rate) + status_thread.start() + + # Block while running N jobs asynchronously + try: + all_succeeded = run_until_complete(execute_jobs( + 'build', + jobs, + event_queue, + os.path.join(context.build_space_abs, '_logs'), + max_toplevel_jobs=n_jobs, + continue_on_failure=continue_on_failure, + continue_without_deps=False)) + except Exception: + status_thread.keep_running = False + all_succeeded = False + status_thread.join(1.0) + wide_log(str(traceback.format_exc())) + + status_thread.join(1.0) + + # Warn user about new packages + if len(unbuilt_pkgs) > 0: + log(clr("[build] @/@!Note:@| @/Workspace packages have changed, " + "please re-source setup files to use them.@|")) + + if all_succeeded: + # Create isolated devel setup if necessary if context.isolate_devel: if not context.install: _create_unmerged_devel_setup(context) else: _create_unmerged_devel_setup_for_install(context) - if summarize_build: - print_build_summary(context, packages_to_be_built, completed_packages, failed_packages) - wide_log("[build] Finished.") - if not no_notify: - notify("Build Finished", "{0} packages built".format(total_packages)) return 0 - # Else, handle errors - print_error_summary(errors, no_notify, log_dir) - wide_log("") - if summarize_build is True or summarize_build is not False and continue_on_failure is True: - # Always print summary if summarize_build is True - # Conditionally add summary on errors if summarize_build is not explicitly False and - # continue_on_failure is True. - print_build_summary(context, packages_to_be_built, completed_packages, failed_packages) - sys.exit(1) - finally: - # Ensure executors go down - for x in range(jobs): - job_queue.put(None) + else: + return 1 + + except KeyboardInterrupt: + wide_log("[build] Interrupted by user!") + event_queue.put(None) + + +def _create_unmerged_devel_setup(context): + # Find all of the leaf packages in the workspace + # where leaf means that nothing in the workspace depends on it + + # Find all packages in the source space + # Suppress warnings since this is an internal function whose goal is not to + # give feedback on the user's packages + workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[]) + + ordered_packages = topological_order_packages(workspace_packages) + workspace_packages = dict([(p.name, p) for pth, p in workspace_packages.items()]) + dependencies = set([]) + for name, pkg in workspace_packages.items(): + dependencies.update([d.name for d in pkg.buildtool_depends + pkg.build_depends + pkg.run_depends]) + leaf_packages = [] + for name, pkg in workspace_packages.items(): + if pkg.name not in dependencies: + leaf_packages.append(pkg.name) + assert leaf_packages, leaf_packages # Defensive, there should always be at least one leaf + leaf_sources = [] + for pkg_name in leaf_packages: + source_path = os.path.join(context.devel_space_abs, pkg_name, 'setup.sh') + if os.path.isfile(source_path): + leaf_sources.append('. {0}'.format(source_path)) + # In addition to the leaf packages, we need to source the recursive run depends of the leaf packages + run_depends = get_recursive_run_depends_in_workspace( + [workspace_packages[p] for p in leaf_packages], ordered_packages) + run_depends_sources = [] + for run_dep_name in [p.name for pth, p in run_depends]: + source_path = os.path.join(context.devel_space_abs, run_dep_name, 'setup.sh') + if os.path.isfile(source_path): + run_depends_sources.append('. {0}'.format(source_path)) + # Create the setup.sh file + setup_sh_path = os.path.join(context.devel_space_abs, 'setup.sh') + env_file = """\ +#!/usr/bin/env sh +# generated from within catkin_tools/verbs/catkin_build/build.py + +# This file is aggregates the many setup.sh files in the various +# unmerged devel spaces in this folder. +# This is occomplished by sourcing each leaf package and all the +# recursive run dependencies of those leaf packages + +# Source the first package's setup.sh without the --extend option +{first_source} + +# remove all passed in args, resetting $@, $*, $#, $n +shift $# +# set the --extend arg for rest of the packages setup.sh's +set -- $@ "--extend" +# source setup.sh for each of the leaf packages in the workspace +{leaf_sources} + +# And now the setup.sh for each of their recursive run dependencies +{run_depends_sources} +""".format( + first_source=leaf_sources[0], + leaf_sources='\n'.join(leaf_sources[1:]), + run_depends_sources='\n'.join(run_depends_sources) + ) + with open(setup_sh_path, 'w') as f: + f.write(env_file) + # Make this file executable + os.chmod(setup_sh_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR) + # Create the setup.bash file + setup_bash_path = os.path.join(context.devel_space_abs, 'setup.bash') + with open(setup_bash_path, 'w') as f: + f.write("""\ +#!/usr/bin/env bash +# generated from within catkin_tools/verbs/catkin_build/build.py + +CATKIN_SHELL=bash + +# source setup.sh from same directory as this file +_BUILD_SETUP_DIR=$(builtin cd "`dirname "${BASH_SOURCE[0]}"`" && pwd) +. "$_BUILD_SETUP_DIR/setup.sh" +""") + # Make this file executable + os.chmod(setup_bash_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR) + setup_zsh_path = os.path.join(context.devel_space_abs, 'setup.zsh') + with open(setup_zsh_path, 'w') as f: + f.write("""\ +#!/usr/bin/env zsh +# generated from within catkin_tools/verbs/catkin_build/build.py + +CATKIN_SHELL=zsh + +# source setup.sh from same directory as this file +_BUILD_SETUP_DIR=$(builtin cd -q "`dirname "$0"`" && pwd) +emulate sh # emulate POSIX +. "$_BUILD_SETUP_DIR/setup.sh" +emulate zsh # back to zsh mode +""") + # Make this file executable + os.chmod(setup_zsh_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR) + + +def _create_unmerged_devel_setup_for_install(context): + for path in [os.path.join(context.devel_space_abs, f) for f in ['setup.sh', 'setup.bash', 'setup.zsh']]: + with open(path, 'w') as f: + f.write("""\ +#!/usr/bin/env sh +# generated from within catkin_tools/verbs/catkin_build/build.py + +echo "Error: This workspace was built with the '--install' option." +echo " You should source the setup files in the install space instead." +echo " Your environment has not been changed." +""") diff --git a/catkin_tools/verbs/catkin_build/cli.py b/catkin_tools/verbs/catkin_build/cli.py index 7af919ff..c64af063 100644 --- a/catkin_tools/verbs/catkin_build/cli.py +++ b/catkin_tools/verbs/catkin_build/cli.py @@ -15,9 +15,20 @@ from __future__ import print_function import argparse +import os import sys import time +try: + from catkin_pkg.packages import find_packages + from catkin_pkg.topological_order import topological_order_packages +except ImportError as e: + sys.exit( + 'ImportError: "from catkin_pkg.topological_order import ' + 'topological_order" failed: %s\nMake sure that you have installed ' + '"catkin_pkg", and that it is up to date and on the PYTHONPATH.' % e + ) + from catkin_pkg.package import InvalidPackage from catkin_pkg.tool_detection import get_previous_tool_used_on_the_space from catkin_pkg.tool_detection import mark_space_as_built_by @@ -26,25 +37,26 @@ from catkin_tools.argument_parsing import add_cmake_and_make_and_catkin_make_args from catkin_tools.argument_parsing import configure_make_args -from catkin_tools.common import format_time_delta from catkin_tools.common import getcwd +from catkin_tools.common import is_tty from catkin_tools.common import log from catkin_tools.common import find_enclosing_package from catkin_tools.context import Context -from catkin_tools.make_jobserver import set_jobserver_max_mem +import catkin_tools.execution.job_server as job_server -from catkin_tools.metadata import find_enclosing_workspace +from catkin_tools.jobs.job import get_build_type +from catkin_tools.metadata import find_enclosing_workspace from catkin_tools.metadata import get_metadata from catkin_tools.metadata import update_metadata from catkin_tools.resultspace import load_resultspace_environment -from .color import clr +from catkin_tools.terminal_color import set_color -from .common import get_build_type +from .color import clr from .build import build_isolated_workspace from .build import determine_packages_to_be_built @@ -52,7 +64,7 @@ from .build import verify_start_with_option # -# Hack +# Begin Hack # # TODO(wjwwood): remove this, once it is no longer needed. @@ -105,6 +117,9 @@ def prepare_arguments(parser): help='Build the package containing the current working directory.') add('--no-deps', action='store_true', default=False, help='Only build specified packages, not their dependencies.') + add('--unbuilt', action='store_true', default=False, + help='Build packages which have yet to be built.') + start_with_group = pkg_group.add_mutually_exclusive_group() add = start_with_group.add_argument add('--start-with', metavar='PKGNAME', type=str, @@ -121,6 +136,8 @@ def prepare_arguments(parser): add = build_group.add_argument add('--force-cmake', action='store_true', default=None, help='Runs cmake explicitly for each catkin package.') + add('--pre-clean', action='store_true', default=None, + help='Runs `make clean` before building each package.') add('--no-install-lock', action='store_true', default=None, help='Prevents serialization of the install steps, which is on by default to prevent file install collisions') @@ -135,14 +152,12 @@ def prepare_arguments(parser): add = behavior_group.add_argument add('--verbose', '-v', action='store_true', default=False, help='Print output from commands in ordered blocks once the command finishes.') - add('--interleave-output', '-i', action='store_true', default=False, - help='Prevents ordering of command output when multiple commands are running at the same time.') add('--no-status', action='store_true', default=False, help='Suppresses status line, useful in situations where carriage return is not properly supported.') add('--summarize', '--summary', '-s', action='store_true', default=None, help='Adds a build summary to the end of a build; defaults to on with --continue-on-failure, off otherwise') add('--no-summarize', '--no-summary', action='store_false', dest='summarize', - help='explicitly disable the end of build summary') + help='Explicitly disable the end of build summary') add('--override-build-tool-check', action='store_true', default=False, help='use to override failure due to using differnt build tools on the same workspace.') @@ -159,9 +174,9 @@ def status_rate_type(rate): raise argparse.ArgumentTypeError("must be greater than or equal to zero.") return rate - add('--limit-status-rate', '--status-rate', type=status_rate_type, default=0.0, + add('--limit-status-rate', '--status-rate', type=status_rate_type, default=10.0, help='Limit the update rate of the status bar to this frequency. Zero means unlimited. ' - 'Must be positive, default is 0.') + 'Must be positive, default is 10 Hz.') add('--no-notify', action='store_true', default=False, help='Suppresses system pop-up notification.') @@ -171,8 +186,12 @@ def status_rate_type(rate): def dry_run(context, packages, no_deps, start_with): # Print Summary log(context.summary()) + # Get all the packages in the context source space + # Suppress warnings since this is a utility function + workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[]) # Find list of packages in the workspace - packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(packages, context) + packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built( + packages, context, workspace_packages) # Assert start_with package is in the workspace verify_start_with_option(start_with, packages, all_packages, packages_to_be_built + packages_to_be_built_deps) if not no_deps: @@ -196,6 +215,13 @@ def dry_run(context, packages, no_deps, start_with): def main(opts): + + # Set color options + if (opts.force_color or is_tty(sys.stdout)) and not opts.no_color: + set_color(True) + else: + set_color(False) + # Context-aware args if opts.build_this or opts.start_with_this: # Determine the enclosing package @@ -216,17 +242,19 @@ def main(opts): if this_package: opts.packages += [this_package] else: - sys.exit("catkin build: --this was specified, but this directory is not in a catkin package.") + sys.exit( + "[build] Error: In order to use --this, the current directory must be part of a catkin package.") # If --start--with was used without any packages and --this was specified, start with this package if opts.start_with_this: if this_package: opts.start_with = this_package else: - sys.exit("catkin build: --this was specified, but this directory is not in a catkin package.") + sys.exit( + "[build] Error: In order to use --this, the current directory must be part of a catkin package.") - if opts.no_deps and not opts.packages: - sys.exit("With --no-deps, you must specify packages to build.") + if opts.no_deps and not opts.packages and not opts.unbuilt: + sys.exit(clr("[build] @!@{rf}Error:@| With --no-deps, you must specify packages to build.")) # Load the context ctx = Context.load(opts.workspace, opts.profile, opts, append=True) @@ -244,7 +272,7 @@ def main(opts): log("Could not import psutil, but psutil is required when using --mem-limit.") log("Please either install psutil or avoid using --mem-limit.") sys.exit("Exception: {0}".format(exc)) - set_jobserver_max_mem(opts.mem_limit) + job_server.set_max_mem(opts.mem_limit) ctx.make_args = make_args @@ -253,13 +281,13 @@ def main(opts): try: load_resultspace_environment(ctx.extend_path) except IOError as exc: - log(clr("@!@{rf}Error:@| Unable to extend workspace from \"%s\": %s" % + log(clr("[build] @!@{rf}Error:@| Unable to extend workspace from \"%s\": %s" % (ctx.extend_path, exc.message))) return 1 # Check if the context is valid before writing any metadata if not ctx.source_space_exists(): - print("catkin build: error: Unable to find source space `%s`" % ctx.source_space_abs) + print(clr("[build] @!@{rf}Error:@| Unable to find source space `%s`") % ctx.source_space_abs) return 1 # ensure the build space was previously built by catkin_tools @@ -315,23 +343,32 @@ def main(opts): Context.save(ctx) start = time.time() + + # Get parallel toplevel jobs try: - return build_isolated_workspace( - ctx, - packages=opts.packages, - start_with=opts.start_with, - no_deps=opts.no_deps, - jobs=opts.parallel_jobs, - force_cmake=opts.force_cmake, - force_color=opts.force_color, - quiet=not opts.verbose, - interleave_output=opts.interleave_output, - no_status=opts.no_status, - limit_status_rate=opts.limit_status_rate, - lock_install=not opts.no_install_lock, - no_notify=opts.no_notify, - continue_on_failure=opts.continue_on_failure, - summarize_build=opts.summarize # Can be True, False, or None - ) - finally: - log("[build] Runtime: {0}".format(format_time_delta(time.time() - start))) + parallel_jobs = int(opts.parallel_jobs) + except TypeError: + parallel_jobs = None + + # Set VERBOSE environment variable + if opts.verbose: + os.environ['VERBOSE'] = '1' + + return build_isolated_workspace( + ctx, + packages=opts.packages, + start_with=opts.start_with, + no_deps=opts.no_deps, + unbuilt=opts.unbuilt, + n_jobs=parallel_jobs, + force_cmake=opts.force_cmake, + pre_clean=opts.pre_clean, + force_color=opts.force_color, + quiet=not opts.verbose, + no_status=opts.no_status, + limit_status_rate=opts.limit_status_rate, + lock_install=not opts.no_install_lock, + no_notify=opts.no_notify, + continue_on_failure=opts.continue_on_failure, + summarize_build=opts.summarize # Can be True, False, or None + ) diff --git a/catkin_tools/verbs/catkin_build/color.py b/catkin_tools/verbs/catkin_build/color.py index 270035fb..60382031 100644 --- a/catkin_tools/verbs/catkin_build/color.py +++ b/catkin_tools/verbs/catkin_build/color.py @@ -67,34 +67,3 @@ color_mapper = ColorMapper(_color_translation_map) clr = color_mapper.clr - - -def colorize_cmake(line): - """Colorizes output from CMake - - :param line: one, new line terminated, line from `cmake` which needs coloring. - :type line: str - """ - cline = sanitize(line) - if line.startswith('-- '): - cline = '@{cf}-- @|' + cline[len('-- '):] - if ':' in cline: - split_cline = cline.split(':') - cline = split_cline[0] + ':@{yf}' + ':'.join(split_cline[1:]) + '@|' - if line.lower().startswith('warning'): - # WARNING - cline = fmt('@{yf}') + cline - if line.startswith('CMake Warning'): - # CMake Warning... - cline = cline.replace('CMake Warning', '@{yf}@!CMake Warning@|') - if line.startswith('ERROR:'): - # ERROR: - cline = cline.replace('ERROR:', '@!@{rf}ERROR:@|') - if line.startswith('CMake Error'): - # CMake Error... - cline = cline.replace('CMake Error', '@{rf}@!CMake Error@|') - if line.startswith('Call Stack (most recent call first):'): - # CMake Call Stack - cline = cline.replace('Call Stack (most recent call first):', - '@{cf}@_Call Stack (most recent call first):@|') - return fmt(cline) diff --git a/catkin_tools/verbs/catkin_build/common.py b/catkin_tools/verbs/catkin_build/common.py deleted file mode 100644 index eef49d28..00000000 --- a/catkin_tools/verbs/catkin_build/common.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2014 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import os -import stat -import sys - -# Due to portability issues, it uses only POSIX-compliant shell features. -# This means that there is no support for BASH-like arrays, and special -# care needs to be taken in order to preserve argument atomicity when -# passing along to the `exec` instruction at the end. -# -# This involves forming a string called `_ARGS` which is composed of -# tokens like `"$_Ai"` for i=0..N-1 for N arguments so that with N=3 -# arguments, for example, `_ARGS` would look like `"$_A0" "$_A1" "$_A2"`. -# The double-quotes are necessary because they define the argument -# boundaries when the variables are expanded by calling `eval`. - -env_file_template = """\ -#!/usr/bin/env sh -# generated from within catkin_tools/verbs/catkin_build/common.py - -if [ $# -eq 0 ] ; then - /bin/echo "Usage: build_env.sh COMMANDS" - /bin/echo "Calling build_env.sh without arguments is not supported anymore." - /bin/echo "Instead spawn a subshell and source a setup file manually." - exit 1 -fi - -# save original args for later -_ARGS= -_ARGI=0 -for arg in "$@"; do - # Define placeholder variable - eval "_A$_ARGI=\$arg" - # Add placeholder variable to arg list - _ARGS="$_ARGS \\"\$_A$_ARGI\\"" - # Increment arg index - _ARGI=`expr $_ARGI + 1` - - ####################### - ## Uncomment for debug: - #_escaped="$(echo "$arg" | sed -e 's@ @ @g')" - #echo "$_escaped" - #eval "echo '$_ARGI \$_A$_ARGI'" - ####################### -done - -####################### -## Uncomment for debug: -#echo "exec args:" -#echo "$_ARGS" -#for arg in $_ARGS; do eval echo $arg; done -#echo "-----------" -##################### - -# remove all passed in args, resetting $@, $*, $#, $n -shift $# -# set the args for the sourced scripts -set -- $@ "--extend" -# source setup.sh with implicit --extend argument for each direct build depend in the workspace -{sources} - -# execute given args -eval exec $_ARGS -""" - - -def generate_env_file(sources, env_file_path): - env_file = env_file_template.format(sources='\n'.join(sources)) - with open(env_file_path, 'w') as f: - f.write(env_file) - # Make this file executable - os.chmod(env_file_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR) - return env_file_path - - -def create_build_space(buildspace, package_name): - """Creates a build space, if it does not already exist, in the build space - - :param buildspace: folder in which packages are built - :type buildspace: str - :param package_name: name of the package this build space is for - :type package_name: str - :returns: package specific build directory - :rtype: str - """ - package_build_dir = os.path.join(buildspace, package_name) - if not os.path.exists(package_build_dir): - os.makedirs(package_build_dir) - return package_build_dir - - -def get_build_type(package): - """Returns the build type for a given package - - :param package: package object - :type package: :py:class:`catkin_pkg.package.Package` - :returns: build type of the package, e.g. 'catkin' or 'cmake' - :rtype: str - """ - export_tags = [e.tagname for e in package.exports] - if 'build_type' in export_tags: - build_type_tag = [e.content for e in package.exports if e.tagname == 'build_type'][0] - else: - build_type_tag = 'catkin' - return build_type_tag - - -def get_python_install_dir(): - """Returns the same value as the CMake variable PYTHON_INSTALL_DIR - - The PYTHON_INSTALL_DIR variable is normally set from the CMake file: - - catkin/cmake/python.cmake - - :returns: Python install directory for the system Python - :rtype: str - """ - python_install_dir = 'lib' - if os.name != 'nt': - python_version_xdoty = str(sys.version_info[0]) + '.' + str(sys.version_info[1]) - python_install_dir = os.path.join(python_install_dir, 'python' + python_version_xdoty) - - python_use_debian_layout = os.path.exists('/etc/debian_version') - python_packages_dir = 'dist-packages' if python_use_debian_layout else 'site-packages' - python_install_dir = os.path.join(python_install_dir, python_packages_dir) - return python_install_dir diff --git a/catkin_tools/verbs/catkin_build/executor.py b/catkin_tools/verbs/catkin_build/executor.py deleted file mode 100644 index ec41b44c..00000000 --- a/catkin_tools/verbs/catkin_build/executor.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2014 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Executor implementation, these objects create threads and process jobs in them""" - -from __future__ import print_function - -from threading import Thread - -from .color import colorize_cmake - -from catkin_tools.common import remove_ansi_escape -from catkin_tools.runner import run_command - -from catkin_tools.make_jobserver import jobserver_job - - -class ExecutorEvent(object): - - """This is returned by the Executor when an event occurs - - Events can be jobs starting/finishing, commands starting/failing/finishing, - commands producing output (each line is an event), or when the executor - quits or failes. - """ - - def __init__(self, executor_id, event_type, data, package): - self.executor_id = executor_id - self.event_type = event_type - self.data = data - self.package = package - - -class Executor(Thread): - - """Threaded executor for the parallel catkin build jobs""" - name_prefix = 'build' - - def __init__(self, executor_id, context, comm_queue, job_queue, install_lock, continue_on_failure=False): - super(Executor, self).__init__() - self.name = self.name_prefix + '-' + str(executor_id + 1) - self.executor_id = executor_id - self.c = context - self.queue = comm_queue - self.jobs = job_queue - self.current_job = None - self.install_space_lock = install_lock - self.shutdown_on_failure = not continue_on_failure - self.should_shutdown = False - - def job_started(self, job): - self.queue.put(ExecutorEvent(self.executor_id, 'job_started', {}, job.package.name)) - - def command_started(self, cmd, location): - package_name = '' if self.current_job is None else self.current_job.package.name - data = { - 'cmd': cmd, - 'location': location - } - self.queue.put(ExecutorEvent(self.executor_id, 'command_started', data, package_name)) - - def command_log(self, msg): - package_name = '' if self.current_job is None else self.current_job.package.name - data = {'message': msg} - self.queue.put(ExecutorEvent(self.executor_id, 'command_log', data, package_name)) - - def command_failed(self, cmd, location, retcode): - package_name = '' if self.current_job is None else self.current_job.package.name - data = { - 'cmd': cmd, - 'location': location, - 'retcode': retcode - } - self.queue.put(ExecutorEvent(self.executor_id, 'command_failed', data, package_name)) - - def command_finished(self, cmd, location, retcode): - package_name = '' if self.current_job is None else self.current_job.package.name - data = { - 'cmd': cmd, - 'location': location, - 'retcode': retcode - } - self.queue.put(ExecutorEvent(self.executor_id, 'command_finished', data, package_name)) - - def job_finished(self, job): - self.queue.put(ExecutorEvent(self.executor_id, 'job_finished', {}, job.package.name)) - - def job_failed(self, job): - self.queue.put(ExecutorEvent(self.executor_id, 'job_failed', {}, job.package.name)) - - def quit(self, exc=None): - package_name = '' if self.current_job is None else self.current_job.package.name - data = { - 'reason': 'normal' if exc is None else 'exception', - 'exc': str(exc) - } - self.queue.put(ExecutorEvent(self.executor_id, 'exit', data, package_name)) - - def run(self): - try: - # Until exit - while True: - # Get a job off the queue - self.current_job = self.jobs.get() - # If the job is None, then we should shutdown - if self.current_job is None: - # Notify shutdown - self.quit() - break - # Notify that a new job was started - self.job_started(self.current_job) - - # Track if the job has failed - job_has_failed = False - - # Execute each command in the job - with jobserver_job(): - # Check here for externally set shutdown condition. - # This prevents trailing jobs when using the job server. - if self.should_shutdown: - self.quit() - return - for command in self.current_job: - install_space_locked = False - if command.lock_install_space: - self.install_space_lock.acquire() - install_space_locked = True - try: - # don't run further commands if previous one of this job failed - if job_has_failed: - break - # Log that the command being run - self.command_started(command, command.location) - # Receive lines from the running command - for line in run_command(command.cmd, cwd=command.location): - # If it is an integer, it corresponds to the command's return code - if isinstance(line, int): - retcode = line - # If the return code is not zero - if retcode != 0: - # Log the failure (the build loop will dispatch None's) - self.command_failed(command, command.location, retcode) - job_has_failed = True - break - else: - self.command_finished(command, command.location, retcode) - else: - # Otherwise it is some sort of string data - # Ensure that the data is not just ansi escape characters - if remove_ansi_escape(line).strip(): - for sub_line in line.splitlines(True): # keepends=True - if sub_line: - if command.stage_name == 'cmake': - sub_line = colorize_cmake(sub_line) - self.command_log(sub_line) - finally: - if install_space_locked: - self.install_space_lock.release() - - # Check if the job has failed - if job_has_failed: - self.job_failed(self.current_job) - if self.shutdown_on_failure: - # Try to consume and throw away any and all remaining jobs in the queue - while self.jobs.get() is not None: - pass - # Once we get our None, quit - self.quit() - return - else: - self.job_finished(self.current_job) - - except KeyboardInterrupt: - self.quit() - except Exception as exc: - import traceback - self.quit(traceback.format_exc() + str(exc)) - raise diff --git a/catkin_tools/verbs/catkin_build/job.py b/catkin_tools/verbs/catkin_build/job.py deleted file mode 100644 index 1fcb3b52..00000000 --- a/catkin_tools/verbs/catkin_build/job.py +++ /dev/null @@ -1,340 +0,0 @@ -# Copyright 2014 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import os -import subprocess -import sys -import tempfile - -from catkin_tools.argument_parsing import handle_make_arguments -from catkin_tools.common import get_cached_recursive_build_depends_in_workspace -from catkin_tools.utils import which - -from .common import create_build_space -from .common import generate_env_file -from .common import get_python_install_dir - -# FileNotFoundError is only defined in Python3, but IOError can be used. -try: - FileNotFoundError -except NameError: - FileNotFoundError = IOError - -MAKE_EXEC = which('make') -CMAKE_EXEC = which('cmake') - - -class Command(object): - - """Single command which is part of a job""" - lock_install_space = False - stage_name = '' - - def __init__(self, env_loader, cmd, location): - self.cmd = [env_loader] + cmd - self.cmd_str = ' '.join(self.cmd) - self.executable = os.path.basename(cmd[0]) - self.pretty = ' '.join([self.executable] + cmd[1:]) - self.plain_cmd = cmd - self.plain_cmd_str = ' '.join(self.plain_cmd) - self.env_loader = env_loader - self.location = location - - -class MakeCommand(Command): - stage_name = 'make' - - def __init__(self, env_loader, cmd, location): - super(MakeCommand, self).__init__(env_loader, cmd, location) - - if MAKE_EXEC is None: - raise RuntimeError("Executable 'make' could not be found in PATH.") - - -class CMakeCommand(Command): - stage_name = 'cmake' - - def __init__(self, env_loader, cmd, location): - super(CMakeCommand, self).__init__(env_loader, cmd, location) - - if CMAKE_EXEC is None: - raise RuntimeError("Executable 'cmake' could not be found in PATH.") - - -class InstallCommand(MakeCommand): - - """Command which touches the install space""" - lock_install_space = True - stage_name = 'make install' - - def __init__(self, env_loader, cmd, location): - super(InstallCommand, self).__init__(env_loader, cmd, location) - - -class Job(object): - - """Encapsulates a job which builds a package""" - - def __init__(self, package, package_path, context, force_cmake): - self.package = package - self.package_path = package_path - self.context = context - self.force_cmake = force_cmake - self.commands = [] - self.__command_index = 0 - - def get_commands(self): - raise NotImplementedError('get_commands') - - def __iter__(self): - return self - - def __next__(self): - return self.next() - - def next(self): - if self.__command_index >= len(self.commands): - raise StopIteration() - self.__command_index += 1 - return self.commands[self.__command_index - 1] - - -def create_env_file(package, context): - # Exporting _CATKIN_SETUP_DIR supports DESTDIR, where the absolute location of the installspace - # at build time does not match the intended final location of it. - source_snippet = '_CATKIN_SETUP_DIR={space_path} . "{source_path}"' - sources = [] - # If installing to isolated folders or not installing, but devel spaces are not merged - if (context.install and context.isolate_install) or (not context.install and context.isolate_devel): - # Source each package's install or devel space - space = context.install_space_abs if context.install else context.devel_space_abs - # Get the recursive dependencies - depends = get_cached_recursive_build_depends_in_workspace(package, context.packages) - # For each dep add a line to source its setup file - for dep_pth, dep in depends: - space_path = os.path.join(space, dep.name) - source_path = os.path.join(space_path, 'setup.sh') - sources.append(source_snippet.format(space_path=space_path, source_path=source_path)) - else: - # Just source common install or devel space - if context.install: - if context.destdir: - # Using `os.path.join` is explicitly avoided here, since - # `context.install_space_abs` is always absolute and therefore `context.destdir` - # would always be dropped by `os.path.join`. - space_path = context.destdir + context.install_space_abs - else: - space_path = context.install_space_abs - else: - space_path = context.devel_space_abs - - source_path = os.path.join(space_path, "setup.sh") - if os.path.exists(source_path): - sources.append(source_snippet.format(space_path=space_path, source_path=source_path)) - - # Build the env_file - env_file_path = os.path.abspath(os.path.join(context.build_space_abs, package.name, 'build_env.sh')) - generate_env_file(sources, env_file_path) - return env_file_path - - -# TODO: Move various Job types out to another file - -class CMakeJob(Job): - - """Job class for building plain cmake packages""" - - def __init__(self, package, package_path, context, force_cmake): - Job.__init__(self, package, package_path, context, force_cmake) - self.commands = self.get_commands() - - def get_multiarch(self): - if not sys.platform.lower().startswith('linux'): - return '' - # this function returns the suffix for lib directories on supported systems or an empty string - # it uses two step approach to look for multiarch: first run gcc -print-multiarch and if - # failed try to run dpkg-architecture - error_thrown = False - try: - p = subprocess.Popen( - ['gcc', '-print-multiarch'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = p.communicate() - except (OSError, FileNotFoundError): - error_thrown = True - if error_thrown or p.returncode != 0: - try: - out, err = subprocess.Popen( - ['dpkg-architecture', '-qDEB_HOST_MULTIARCH'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() - except (OSError, FileNotFoundError): - return '' - # be sure to return empty string or a valid multiarch tuple - decoded = out.decode().strip() - assert(not decoded or decoded.count('-') == 2) - return decoded - - def get_commands(self): - commands = [] - # Setup build variables - pkg_dir = os.path.join(self.context.source_space_abs, self.package_path) - build_space = create_build_space(self.context.build_space_abs, self.package.name) - if self.context.isolate_devel: - devel_space = os.path.join(self.context.devel_space_abs, self.package.name) - else: - devel_space = self.context.devel_space_abs - if self.context.isolate_install: - install_space = os.path.join(self.context.install_space_abs, self.package.name) - else: - install_space = self.context.install_space_abs - install_target = install_space if self.context.install else devel_space - # Create an environment file - env_cmd = create_env_file(self.package, self.context) - # CMake command - makefile_path = os.path.join(build_space, 'Makefile') - if not os.path.isfile(makefile_path) or self.force_cmake: - commands.append(CMakeCommand( - env_cmd, - [ - CMAKE_EXEC, - pkg_dir, - '-DCMAKE_INSTALL_PREFIX=' + install_target - ] + self.context.cmake_args, - build_space - )) - commands[-1].cmd.extend(self.context.cmake_args) - else: - commands.append(MakeCommand(env_cmd, [MAKE_EXEC, 'cmake_check_build_system'], build_space)) - # Make command - commands.append(MakeCommand( - env_cmd, - [MAKE_EXEC] + handle_make_arguments(self.context.make_args), - build_space - )) - # Make install command (always run on plain cmake) - commands.append(InstallCommand(env_cmd, [MAKE_EXEC, 'install'], build_space)) - # Determine the location of where the setup.sh file should be created - if self.context.install: - setup_file_path = os.path.join(install_space, 'setup.sh') - if not self.context.isolate_install and os.path.exists(setup_file_path): - return commands - else: # Create it in the devel space - setup_file_path = os.path.join(devel_space, 'setup.sh') - if not self.context.isolate_devel and os.path.exists(setup_file_path): - # Do not replace existing setup.sh if devel space is merged - return commands - # Create the setup file other packages will source when depending on this package - arch = self.get_multiarch() - subs = {} - subs['cmake_prefix_path'] = install_target + ":" - subs['ld_path'] = os.path.join(install_target, 'lib') + ":" - pythonpath = os.path.join(install_target, get_python_install_dir()) - subs['pythonpath'] = pythonpath + ':' - subs['pkgcfg_path'] = os.path.join(install_target, 'lib', 'pkgconfig') + ":" - subs['path'] = os.path.join(install_target, 'bin') + ":" - if arch: - subs['ld_path'] += os.path.join(install_target, 'lib', arch) + ":" - subs['pkgcfg_path'] += os.path.join(install_target, 'lib', arch, 'pkgconfig') + ":" - setup_file_directory = os.path.dirname(setup_file_path) - if not os.path.exists(setup_file_directory): - os.makedirs(setup_file_directory) - # Create a temporary file in the setup_file_directory, so os.rename cannot fail - tmp_dst_handle, tmp_dst_path = tempfile.mkstemp( - dir=setup_file_directory, - prefix=os.path.basename(setup_file_path) + '.') - # Write the fulfilled template to the file - data = """\ -#!/usr/bin/env sh -# generated from catkin_tools.verbs.catkin_build.job python module - -# remember type of shell if not already set -if [ -z "$CATKIN_SHELL" ]; then - CATKIN_SHELL=sh -fi - -# detect if running on Darwin platform -_UNAME=`uname -s` -IS_DARWIN=0 -if [ "$_UNAME" = "Darwin" ]; then - IS_DARWIN=1 -fi - -# Prepend to the environment -export CMAKE_PREFIX_PATH="{cmake_prefix_path}$CMAKE_PREFIX_PATH" -if [ $IS_DARWIN -eq 0 ]; then - export LD_LIBRARY_PATH="{ld_path}$LD_LIBRARY_PATH" -else - export DYLD_LIBRARY_PATH="{ld_path}$DYLD_LIBRARY_PATH" -fi -export PATH="{path}$PATH" -export PKG_CONFIG_PATH="{pkgcfg_path}$PKG_CONFIG_PATH" -export PYTHONPATH="{pythonpath}$PYTHONPATH" -""".format(**subs) - os.write(tmp_dst_handle, data.encode('utf-8')) - os.close(tmp_dst_handle) - # Do an atomic rename with os.rename - os.rename(tmp_dst_path, setup_file_path) - return commands - - -class CatkinJob(Job): - - """Job class for building catkin packages""" - - def __init__(self, package, package_path, context, force_cmake): - Job.__init__(self, package, package_path, context, force_cmake) - self.commands = self.get_commands() - - def get_commands(self): - commands = [] - # Setup build variables - pkg_dir = os.path.join(self.context.source_space_abs, self.package_path) - build_space = create_build_space(self.context.build_space_abs, self.package.name) - if self.context.isolate_devel: - devel_space = os.path.join(self.context.devel_space_abs, self.package.name) - else: - devel_space = self.context.devel_space_abs - if self.context.isolate_install: - install_space = os.path.join(self.context.install_space_abs, self.package.name) - else: - install_space = self.context.install_space_abs - # Create an environment file - env_cmd = create_env_file(self.package, self.context) - # CMake command - makefile_path = os.path.join(build_space, 'Makefile') - if not os.path.isfile(makefile_path) or self.force_cmake: - commands.append(CMakeCommand( - env_cmd, - [ - CMAKE_EXEC, - pkg_dir, - '-DCATKIN_DEVEL_PREFIX=' + devel_space, - '-DCMAKE_INSTALL_PREFIX=' + install_space - ] + self.context.cmake_args, - build_space - )) - else: - commands.append(MakeCommand(env_cmd, [MAKE_EXEC, 'cmake_check_build_system'], build_space)) - # Make command - commands.append(MakeCommand( - env_cmd, - [MAKE_EXEC] + handle_make_arguments(self.context.make_args + self.context.catkin_make_args), - build_space - )) - # Make install command, if installing - if self.context.install: - commands.append(InstallCommand(env_cmd, [MAKE_EXEC, 'install'], build_space)) - return commands diff --git a/setup.py b/setup.py index 8b2485b8..add018b5 100644 --- a/setup.py +++ b/setup.py @@ -69,6 +69,7 @@ def get_data_files(prefix): class PermissiveInstall(install): + def run(self): install.run(self) if os.name == 'posix': @@ -127,6 +128,10 @@ def run(self): 'locate = catkin_tools.verbs.catkin_locate:description', 'profile = catkin_tools.verbs.catkin_profile:description', ], + 'catkin_tools.jobs': [ + 'catkin = catkin_tools.jobs.catkin:description', + 'cmake = catkin_tools.jobs.cmake:description', + ], }, cmdclass={'install': PermissiveInstall}, )