diff --git a/CMakeLists.txt b/CMakeLists.txt index 859e4022e..480ae4445 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -74,8 +74,8 @@ rapids_find_package( rapids_cpm_init() include(${rapids-cmake-dir}/cpm/rapids_logger.cmake) -rapids_cpm_rapids_logger() -rapids_make_logger(rmm EXPORT_SET rmm-exports) +rapids_cpm_rapids_logger(BUILD_EXPORT_SET rmm-exports INSTALL_EXPORT_SET rmm-exports) +create_logger_macros(RMM "rmm::default_logger()" include/rmm) include(cmake/thirdparty/get_cccl.cmake) include(cmake/thirdparty/get_nvtx.cmake) @@ -86,8 +86,10 @@ include(cmake/thirdparty/get_nvtx.cmake) add_library(rmm INTERFACE) add_library(rmm::rmm ALIAS rmm) -target_include_directories(rmm INTERFACE "$" - "$") +target_include_directories( + rmm + INTERFACE "$" + "$" "$") if(CUDA_STATIC_RUNTIME) message(STATUS "RMM: Enabling static linking of cudart") @@ -96,10 +98,10 @@ else() target_link_libraries(rmm INTERFACE CUDA::cudart) endif() -target_link_libraries(rmm INTERFACE rmm_logger) target_link_libraries(rmm INTERFACE CCCL::CCCL) target_link_libraries(rmm INTERFACE dl) target_link_libraries(rmm INTERFACE nvtx3::nvtx3-cpp) +target_link_libraries(rmm INTERFACE rapids_logger::rapids_logger) target_compile_features(rmm INTERFACE cxx_std_17 $) target_compile_definitions(rmm INTERFACE LIBCUDACXX_ENABLE_EXPERIMENTAL_MEMORY_RESOURCE) diff --git a/README.md b/README.md index 44c262ac5..54bab2eab 100644 --- a/README.md +++ b/README.md @@ -645,17 +645,16 @@ set to `True`. The log file name can be set using the `log_file_name` parameter. ### Debug Logging -RMM includes a debug logger which can be enabled to log trace and debug information to a file. This -information can show when errors occur, when additional memory is allocated from upstream resources, -etc. By default output is logged to stderr, but the environment variable -`RMM_DEBUG_LOG_FILE` can be set to specify a path and file name to dump the logs to instead. +RMM leverages [`rapids-logger`](https://github.com/rapidsai/rapids-logger) to log trace and debug +information to a file. This information can show when errors occur, when additional memory is +allocated from upstream resources, etc. By default output is logged to stderr, but the environment +variable `RMM_DEBUG_LOG_FILE` can be set to specify a path and file name to dump the logs to +instead. There is a CMake configuration variable `RMM_LOGGING_LEVEL`, which can be set to enable compilation of more detailed logging. The default is `INFO`. Available levels are `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `CRITICAL` and `OFF`. -The log relies on the [spdlog](https://github.com/gabime/spdlog.git) library. - Note that to see logging below the `INFO` level, the application must also set the logging level at run time. C++ applications must must call `rmm::default_logger().set_level()`, for example to enable all levels of logging down to `TRACE`, call `rmm::default_logger().set_level(spdlog::level::trace)` (and compile diff --git a/benchmarks/CMakeLists.txt b/benchmarks/CMakeLists.txt index b3b60cfcb..adf24a969 100644 --- a/benchmarks/CMakeLists.txt +++ b/benchmarks/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2018-2024, NVIDIA CORPORATION. +# Copyright (c) 2018-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -45,8 +45,6 @@ function(ConfigureBench BENCH_NAME) target_compile_options(${BENCH_NAME} PUBLIC $<$:-Wall -Werror -Wno-unknown-pragmas>) - target_link_libraries(${BENCH_NAME} PRIVATE rmm_bench_logger) - if(DISABLE_DEPRECATION_WARNING) target_compile_options( ${BENCH_NAME} PUBLIC $<$:-Xcompiler=-Wno-deprecated-declarations>) @@ -61,10 +59,6 @@ function(ConfigureBench BENCH_NAME) EXCLUDE_FROM_ALL) endfunction(ConfigureBench) -# Create an object library for the logger so that we don't have to recompile it. -add_library(rmm_bench_logger OBJECT) -target_link_libraries(rmm_bench_logger PRIVATE rmm_logger_impl) - # random allocations benchmark ConfigureBench(RANDOM_ALLOCATIONS_BENCH random_allocations/random_allocations.cpp) diff --git a/benchmarks/replay/replay.cpp b/benchmarks/replay/replay.cpp index 8edbf11f9..c115d9e42 100644 --- a/benchmarks/replay/replay.cpp +++ b/benchmarks/replay/replay.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -403,7 +403,7 @@ int main(int argc, char** argv) auto const num_threads = per_thread_events.size(); // Uncomment to enable / change default log level - // rmm::logger().set_level(rmm::level_enum::trace); + // rmm::logger().set_level(rapids_logger::level_enum::trace); if (args.count("resource") > 0) { std::string mr_name = args["resource"].as(); diff --git a/ci/build_wheel_python.sh b/ci/build_wheel_python.sh index bbd16bb6a..2a852f47c 100755 --- a/ci/build_wheel_python.sh +++ b/ci/build_wheel_python.sh @@ -32,7 +32,10 @@ PIP_CONSTRAINT="${PWD}/build-constraints.txt" \ sccache --show-adv-stats mkdir -p final_dist -python -m auditwheel repair -w "${wheel_dir}" dist/* +EXCLUDE_ARGS=( + --exclude "librapids_logger.so" +) +python -m auditwheel repair "${EXCLUDE_ARGS[@]}" -w final_dist dist/* ../../ci/validate_wheel.sh "${wheel_dir}" diff --git a/ci/check_symbols.sh b/ci/check_symbols.sh index 377a93cac..688862aaf 100755 --- a/ci/check_symbols.sh +++ b/ci/check_symbols.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2024, NVIDIA CORPORATION. +# Copyright (c) 2024-2025, NVIDIA CORPORATION. set -eEuo pipefail @@ -47,17 +47,6 @@ for dso_file in ${dso_files}; do echo " * WEAK: $(grep --count -E ' WEAK ' < ${symbol_file})" echo " * LOCAL: $(grep --count -E ' LOCAL ' < ${symbol_file})" - echo "checking for 'fmt::' symbols..." - if grep -E 'fmt\:\:' < "${symbol_file}"; then - raise-symbols-found-error 'fmt::' - fi - - echo "checking for 'spdlog::' symbols..." - if grep -E 'spdlog\:\:' < "${symbol_file}" \ - | grep -v 'std\:\:_Destroy_aux' - then - raise-symbols-found-error 'spdlog::' - fi echo "No symbol visibility issues found" done diff --git a/ci/test_wheel.sh b/ci/test_wheel.sh index 458449443..8b49d17e1 100755 --- a/ci/test_wheel.sh +++ b/ci/test_wheel.sh @@ -14,6 +14,7 @@ rapids-generate-pip-constraints test_python ./constraints.txt rapids-pip-retry install \ -v \ --constraint ./constraints.txt \ + "$(echo "${WHEELHOUSE}"/librmm_"${RAPIDS_PY_CUDA_SUFFIX}"*.whl)" \ "$(echo "${WHEELHOUSE}"/rmm_"${RAPIDS_PY_CUDA_SUFFIX}"*.whl)[test]" python -m pytest ./python/rmm/rmm/tests diff --git a/cmake/thirdparty/get_spdlog.cmake b/cmake/thirdparty/get_spdlog.cmake deleted file mode 100644 index febdf4c5c..000000000 --- a/cmake/thirdparty/get_spdlog.cmake +++ /dev/null @@ -1,33 +0,0 @@ -# ============================================================================= -# Copyright (c) 2021-2024, NVIDIA CORPORATION. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -# in compliance with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed under the License -# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing permissions and limitations under -# the License. -# ============================================================================= - -# Use CPM to find or clone speedlog. -function(find_and_configure_spdlog) - - include(${rapids-cmake-dir}/cpm/spdlog.cmake) - rapids_cpm_spdlog( - # The conda package for fmt is hard-coded to assume that we use a preexisting fmt library. This - # is why we have always had a libfmt linkage despite choosing to specify the header-only version - # of fmt. We need a more robust way of modifying this to support fully self-contained build and - # usage even in environments where fmt and/or spdlog are already present. The crudest solution - # would be to modify the interface compile definitions and link libraries of the spdlog target, - # if necessary. For now I'm specifying EXTERNAL_FMT_HO here so that in environments where spdlog - # is cloned and built from source we wind up with the behavior that we expect, but we'll have to - # resolve this properly eventually. - FMT_OPTION "EXTERNAL_FMT_HO" - INSTALL_EXPORT_SET rmm-exports - BUILD_EXPORT_SET rmm-exports) -endfunction() - -find_and_configure_spdlog() diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index daba1b604..6dfc4771a 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -16,7 +16,6 @@ dependencies: - cxx-compiler - cython>=3.0.0 - doxygen=1.9.1 -- fmt>=11.0.2,<12 - gcc_linux-64=11.* - gcovr>=5.0 - graphviz @@ -34,8 +33,8 @@ dependencies: - pytest-cov - python>=3.10,<3.13 - rapids-build-backend>=0.3.0,<0.4.0.dev0 +- rapids-logger==0.1.*,>=0.0.0a0 - scikit-build-core >=0.10.0 -- spdlog>=1.14.1,<1.15 - sphinx - sphinx-copybutton - sphinx-markdown-tables diff --git a/conda/environments/all_cuda-128_arch-x86_64.yaml b/conda/environments/all_cuda-128_arch-x86_64.yaml index e22c9b5c3..c4eddb72a 100644 --- a/conda/environments/all_cuda-128_arch-x86_64.yaml +++ b/conda/environments/all_cuda-128_arch-x86_64.yaml @@ -16,7 +16,6 @@ dependencies: - cxx-compiler - cython>=3.0.0 - doxygen=1.9.1 -- fmt>=11.0.2,<12 - gcc_linux-64=13.* - gcovr>=5.0 - graphviz @@ -33,8 +32,8 @@ dependencies: - pytest-cov - python>=3.10,<3.13 - rapids-build-backend>=0.3.0,<0.4.0.dev0 +- rapids-logger==0.1.*,>=0.0.0a0 - scikit-build-core >=0.10.0 -- spdlog>=1.14.1,<1.15 - sphinx - sphinx-copybutton - sphinx-markdown-tables diff --git a/conda/recipes/librmm/conda_build_config.yaml b/conda/recipes/librmm/conda_build_config.yaml index 2980a8d5c..83f5ebcb1 100644 --- a/conda/recipes/librmm/conda_build_config.yaml +++ b/conda/recipes/librmm/conda_build_config.yaml @@ -18,9 +18,3 @@ c_stdlib_version: cmake_version: - ">=3.26.4,!=3.30.0" - -fmt_version: - - ">=11.0.2,<12" - -spdlog_version: - - ">=1.14.1,<1.15" diff --git a/conda/recipes/librmm/meta.yaml b/conda/recipes/librmm/meta.yaml index 22f53cdc6..f927315c6 100644 --- a/conda/recipes/librmm/meta.yaml +++ b/conda/recipes/librmm/meta.yaml @@ -26,8 +26,7 @@ requirements: - {{ stdlib("c") }} host: - cuda-version ={{ cuda_version }} - - fmt {{ fmt_version }} - - spdlog {{ spdlog_version }} + - rapids-logger =0.1 build: script_env: @@ -68,8 +67,7 @@ outputs: {% if cuda_major == "11" %} - cudatoolkit {% endif %} - - fmt {{ fmt_version }} - - spdlog {{ spdlog_version }} + - rapids-logger =0.1 test: commands: - test -d "${PREFIX}/include/rmm" diff --git a/dependencies.yaml b/dependencies.yaml index d010a9972..a50d0f884 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -11,6 +11,7 @@ files: - checks - cuda - cuda_version + - depends_on_rapids_logger - develop - docs - py_version @@ -54,6 +55,14 @@ files: key: requires includes: - build + - depends_on_rapids_logger + py_cpp_run: + output: pyproject + pyproject_dir: python/librmm + extras: + table: project + includes: + - depends_on_rapids_logger py_build: output: pyproject pyproject_dir: python/rmm @@ -70,6 +79,7 @@ files: includes: - build - cython_build + - depends_on_librmm py_run: output: pyproject pyproject_dir: python/rmm @@ -77,6 +87,7 @@ files: table: project includes: - run + - depends_on_librmm py_optional_test: output: pyproject pyproject_dir: python/rmm @@ -111,8 +122,6 @@ dependencies: packages: - c-compiler - cxx-compiler - - fmt>=11.0.2,<12 - - spdlog>=1.14.1,<1.15 specific: - output_types: conda matrices: @@ -171,21 +180,33 @@ dependencies: - matrix: # All CUDA 11 versions packages: - &cuda_python11 cuda-python>=11.8.5,<12.0a0 + depends_on_librmm: + common: + - output_types: conda + packages: + - &librmm_unsuffixed librmm==25.4.*,>=0.0.0a0 + - output_types: requirements + packages: + # pip recognizes the index as a global option for the requirements.txt file + # This index is needed for librmm-cu{11,12}. + - --extra-index-url=https://pypi.nvidia.com + - --extra-index-url=https://pypi.anaconda.org/rapidsai-wheels-nightly/simple + specific: - output_types: [requirements, pyproject] matrices: - - matrix: - cuda: "12.*" - cuda_suffixed: "true" - packages: - - librmm-cu12==25.4.*,>=0.0.0a0 - - matrix: - cuda: "11.*" - cuda_suffixed: "true" - packages: - - librmm-cu11==25.4.*,>=0.0.0a0 - - matrix: null - packages: - - librmm==25.4.*,>=0.0.0a0 + - matrix: + cuda: "12.*" + cuda_suffixed: "true" + packages: + - librmm-cu12==25.4.*,>=0.0.0a0 + - matrix: + cuda: "11.*" + cuda_suffixed: "true" + packages: + - librmm-cu11==25.4.*,>=0.0.0a0 + - matrix: + packages: + - *librmm_unsuffixed checks: common: - output_types: [conda, requirements] @@ -338,3 +359,14 @@ dependencies: - numpy==1.23.* - matrix: packages: + depends_on_rapids_logger: + common: + - output_types: [conda, requirements, pyproject] + packages: + - rapids-logger==0.1.*,>=0.0.0a0 + - output_types: requirements + packages: + # pip recognizes the index as a global option for the requirements.txt file + # This index is needed for rapids_logger + - --extra-index-url=https://pypi.nvidia.com + - --extra-index-url=https://pypi.anaconda.org/rapidsai-wheels-nightly/simple diff --git a/include/rmm/logger.hpp b/include/rmm/logger.hpp new file mode 100644 index 000000000..acbc69e77 --- /dev/null +++ b/include/rmm/logger.hpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2025, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include + +#include + +namespace RMM_NAMESPACE { + +/** + * @brief Returns the default sink for the global logger. + * + * If the environment variable `RMM_DEBUG_LOG_FILE` is defined, the default sink is a sink to that + * file. Otherwise, the default is to dump to stderr. + * + * @return sink_ptr The sink to use + */ +inline rapids_logger::sink_ptr default_sink() +{ + auto* filename = std::getenv("RMM_DEBUG_LOG_FILE"); + if (filename != nullptr) { + return std::make_shared(filename, true); + } + return std::make_shared(); +} + +/** + * @brief Returns the default log pattern for the global logger. + * + * @return std::string The default log pattern. + */ +inline std::string default_pattern() { return "[%6t][%H:%M:%S:%f][%-6l] %v"; } + +/** + * @brief Get the default logger. + * + * @return logger& The default logger + */ +inline rapids_logger::logger& default_logger() +{ + static rapids_logger::logger logger_ = [] { + rapids_logger::logger logger_{"RMM", {default_sink()}}; + logger_.set_pattern(default_pattern()); +#if RMM_LOG_ACTIVE_LEVEL <= RMM_LOG_LEVEL_DEBUG +#ifdef CUDA_API_PER_THREAD_DEFAULT_STREAM + logger_.debug("----- RMM LOG [PTDS ENABLED] -----"); +#else + logger_.debug("----- RMM LOG [PTDS DISABLED] -----"); +#endif +#endif + return logger_; + }(); + return logger_; +} + +} // namespace RMM_NAMESPACE diff --git a/include/rmm/mr/device/arena_memory_resource.hpp b/include/rmm/mr/device/arena_memory_resource.hpp index fe07aab04..f8e4e16cb 100644 --- a/include/rmm/mr/device/arena_memory_resource.hpp +++ b/include/rmm/mr/device/arena_memory_resource.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -96,9 +96,10 @@ class arena_memory_resource final : public device_memory_resource { : global_arena_{upstream_mr, arena_size}, dump_log_on_failure_{dump_log_on_failure} { if (dump_log_on_failure_) { - logger_ = std::make_shared("arena_memory_dump", "rmm_arena_memory_dump.log"); + logger_ = + std::make_shared("arena_memory_dump", "rmm_arena_memory_dump.log"); // Set the level to `debug` for more detailed output. - logger_->set_level(level_enum::info); + logger_->set_level(rapids_logger::level_enum::info); } } @@ -352,7 +353,7 @@ class arena_memory_resource final : public device_memory_resource { /// If true, dump memory information to log on allocation failure. bool dump_log_on_failure_{}; /// The logger for memory dump. - std::shared_ptr logger_{}; + std::shared_ptr logger_{}; /// Mutex for read and write locks on arena maps. mutable std::shared_mutex map_mtx_; /// Mutex for shared and unique locks on the mr. diff --git a/include/rmm/mr/device/detail/arena.hpp b/include/rmm/mr/device/detail/arena.hpp index 20095d504..de09a1a53 100644 --- a/include/rmm/mr/device/detail/arena.hpp +++ b/include/rmm/mr/device/detail/arena.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, NVIDIA CORPORATION. + * Copyright (c) 2019-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -645,7 +645,7 @@ class global_arena final { * * @param logger the logger to use */ - void dump_memory_log(std::shared_ptr const& logger) const + void dump_memory_log(std::shared_ptr const& logger) const { std::lock_guard lock(mtx_); diff --git a/include/rmm/mr/device/logging_resource_adaptor.hpp b/include/rmm/mr/device/logging_resource_adaptor.hpp index 2d2291e3f..4f62d842b 100644 --- a/include/rmm/mr/device/logging_resource_adaptor.hpp +++ b/include/rmm/mr/device/logging_resource_adaptor.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -113,7 +113,7 @@ class logging_resource_adaptor final : public device_memory_resource { * performance. */ logging_resource_adaptor(Upstream* upstream, - std::initializer_list sinks, + std::initializer_list sinks, bool auto_flush = false) : logging_resource_adaptor{to_device_async_resource_ref_checked(upstream), sinks, auto_flush} { @@ -178,7 +178,7 @@ class logging_resource_adaptor final : public device_memory_resource { * performance. */ logging_resource_adaptor(device_async_resource_ref upstream, - std::initializer_list sinks, + std::initializer_list sinks, bool auto_flush = false) : logging_resource_adaptor{make_logger(sinks), upstream, auto_flush} { @@ -232,24 +232,27 @@ class logging_resource_adaptor final : public device_memory_resource { } private: - static auto make_logger(std::ostream& stream) { return std::make_shared("RMM", stream); } + static auto make_logger(std::ostream& stream) + { + return std::make_shared("RMM", stream); + } static auto make_logger(std::string const& filename) { - return std::make_shared("RMM", filename); + return std::make_shared("RMM", filename); } - static auto make_logger(std::initializer_list sinks) + static auto make_logger(std::initializer_list sinks) { - return std::make_shared("RMM", sinks); + return std::make_shared("RMM", sinks); } - logging_resource_adaptor(std::shared_ptr logger, + logging_resource_adaptor(std::shared_ptr logger, device_async_resource_ref upstream, bool auto_flush) : logger_{logger}, upstream_{upstream} { - if (auto_flush) { logger_->flush_on(level_enum::info); } + if (auto_flush) { logger_->flush_on(rapids_logger::level_enum::info); } logger_->set_pattern("%v"); logger_->info(header()); logger_->set_pattern("%t,%H:%M:%S.%f,%v"); @@ -328,7 +331,7 @@ class logging_resource_adaptor final : public device_memory_resource { return get_upstream_resource() == cast->get_upstream_resource(); } - std::shared_ptr logger_{}; + std::shared_ptr logger_{}; device_async_resource_ref upstream_; ///< The upstream resource used for satisfying ///< allocation requests diff --git a/python/librmm/librmm/__init__.py b/python/librmm/librmm/__init__.py index b914ecdc3..fac8ca0cb 100644 --- a/python/librmm/librmm/__init__.py +++ b/python/librmm/librmm/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. +# Copyright (c) 2024-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,3 +13,6 @@ # limitations under the License. from librmm._version import __git_commit__, __version__ +from librmm.load import load_library + +__all__ = ["__git_commit__", "__version__", "load_library"] diff --git a/python/librmm/librmm/load.py b/python/librmm/librmm/load.py new file mode 100644 index 000000000..1d32434f8 --- /dev/null +++ b/python/librmm/librmm/load.py @@ -0,0 +1,25 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +def load_library(): + """Librmm is header-only, this function loads its (non-header-only) dependencies""" + try: + import rapids_logger + except ModuleNotFoundError: + pass + else: + rapids_logger.load_library() + del rapids_logger diff --git a/python/librmm/pyproject.toml b/python/librmm/pyproject.toml index 6fee31c9a..f7a6f7088 100644 --- a/python/librmm/pyproject.toml +++ b/python/librmm/pyproject.toml @@ -36,6 +36,9 @@ classifiers = [ "Programming Language :: C++", "Environment :: GPU :: NVIDIA CUDA", ] +dependencies = [ + "rapids-logger==0.1.*,>=0.0.0a0", +] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. [project.urls] Homepage = "https://github.com/rapidsai/rmm" @@ -50,6 +53,7 @@ matrix-entry = "cuda_suffixed=true" requires = [ "cmake>=3.26.4,!=3.30.0", "ninja", + "rapids-logger==0.1.*,>=0.0.0a0", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. [tool.scikit-build] diff --git a/python/rmm/CMakeLists.txt b/python/rmm/CMakeLists.txt index a39ac8868..2ee07605b 100644 --- a/python/rmm/CMakeLists.txt +++ b/python/rmm/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -29,10 +29,6 @@ rapids_cython_init() # pass through logging level to spdlog add_compile_definitions("RMM_LOG_ACTIVE_LEVEL=RMM_LOG_LEVEL_${RMM_LOGGING_LEVEL}") -# Create an object library for the logger so that we don't have to recompile it. -add_library(cpp_logger OBJECT) -target_link_libraries(cpp_logger PRIVATE rmm::rmm_logger_impl) - add_subdirectory(rmm/_cuda) add_subdirectory(rmm/librmm) add_subdirectory(rmm/pylibrmm) diff --git a/python/rmm/docs/conf.py b/python/rmm/docs/conf.py index a21698ded..b81f309ce 100644 --- a/python/rmm/docs/conf.py +++ b/python/rmm/docs/conf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Configuration file for the Sphinx documentation builder. # @@ -238,8 +238,8 @@ def on_missing_reference(app, env, node, contnode): "thrust", "spdlog", "stream_ref", - # logger names (we may eventually want to link out for those) - "sink_ptr", + # rapids_logger names + "rapids_logger", # libcu++ names "cuda", "cuda::mr", diff --git a/python/rmm/pyproject.toml b/python/rmm/pyproject.toml index d457b0913..2771ee027 100644 --- a/python/rmm/pyproject.toml +++ b/python/rmm/pyproject.toml @@ -31,6 +31,7 @@ license = { text = "Apache 2.0" } requires-python = ">=3.10" dependencies = [ "cuda-python>=11.8.5,<12.0a0", + "librmm==25.4.*,>=0.0.0a0", "numpy>=1.23,<3.0a0", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ diff --git a/python/rmm/rmm/__init__.py b/python/rmm/rmm/__init__.py index f1d6e2d7e..c813ec42b 100644 --- a/python/rmm/rmm/__init__.py +++ b/python/rmm/rmm/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2024, NVIDIA CORPORATION. +# Copyright (c) 2018-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,15 @@ import warnings +# This path is only taken for wheels where librmm is a separate Python package. +try: + import librmm +except ModuleNotFoundError: + pass +else: + librmm.load_library() + del librmm + from rmm import mr from rmm._version import __git_commit__, __version__ from rmm.mr import disable_logging, enable_logging, get_log_filenames diff --git a/python/rmm/rmm/_cuda/CMakeLists.txt b/python/rmm/rmm/_cuda/CMakeLists.txt index 1617ead7f..f45c07aea 100644 --- a/python/rmm/rmm/_cuda/CMakeLists.txt +++ b/python/rmm/rmm/_cuda/CMakeLists.txt @@ -12,7 +12,7 @@ # the License. # ============================================================================= -set(linked_libraries rmm::rmm cpp_logger) +set(linked_libraries rmm::rmm) rapids_cython_create_modules(SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}" CXX) diff --git a/python/rmm/rmm/librmm/CMakeLists.txt b/python/rmm/rmm/librmm/CMakeLists.txt index dc807fdba..006b24227 100644 --- a/python/rmm/rmm/librmm/CMakeLists.txt +++ b/python/rmm/rmm/librmm/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -13,7 +13,7 @@ # ============================================================================= set(cython_sources _logger.pyx) -set(linked_libraries rmm::rmm cpp_logger) +set(linked_libraries rmm::rmm) # Build all of the Cython targets rapids_cython_create_modules(SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}" diff --git a/python/rmm/rmm/librmm/_logger.pxd b/python/rmm/rmm/librmm/_logger.pxd index bd0728bc1..b820b4e4e 100644 --- a/python/rmm/rmm/librmm/_logger.pxd +++ b/python/rmm/rmm/librmm/_logger.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ from libcpp cimport bool from libcpp.string cimport string -cdef extern from "rmm/logger.hpp" namespace "rmm" nogil: +cdef extern from "rapids_logger/logger.hpp" namespace "rapids_logger" nogil: cpdef enum class level_enum: trace debug @@ -36,4 +36,6 @@ cdef extern from "rmm/logger.hpp" namespace "rmm" nogil: level_enum flush_level() except + bool should_log(level_enum msg_level) except + + +cdef extern from "rmm/logger.hpp" namespace "rmm" nogil: cdef logger& default_logger() except + diff --git a/python/rmm/rmm/pylibrmm/CMakeLists.txt b/python/rmm/rmm/pylibrmm/CMakeLists.txt index 1be58c32e..56399fd4e 100644 --- a/python/rmm/rmm/pylibrmm/CMakeLists.txt +++ b/python/rmm/rmm/pylibrmm/CMakeLists.txt @@ -14,7 +14,7 @@ set(cython_sources device_buffer.pyx logger.pyx memory_resource.pyx cuda_stream.pyx helper.pyx stream.pyx) -set(linked_libraries rmm::rmm cpp_logger) +set(linked_libraries rmm::rmm) # Build all of the Cython targets rapids_cython_create_modules(SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}" diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 413f27f26..d026c56e2 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2018-2024, NVIDIA CORPORATION. +# Copyright (c) 2018-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -44,8 +44,6 @@ function(ConfigureTestInternal TEST_NAME) PUBLIC "RMM_LOG_ACTIVE_LEVEL=RMM_LOG_LEVEL_${RMM_LOGGING_LEVEL}") target_compile_options(${TEST_NAME} PUBLIC $<$:-Wall -Werror>) - target_link_libraries(${TEST_NAME} PRIVATE rmm_test_logger) - if(DISABLE_DEPRECATION_WARNING) target_compile_options( ${TEST_NAME} PUBLIC $<$:-Xcompiler=-Wno-deprecated-declarations>) @@ -130,12 +128,6 @@ function(ConfigureTest TEST_NAME) endfunction() -# Create an object library for the logger so that we don't have to recompile it. -add_library(rmm_test_logger OBJECT) -target_link_libraries(rmm_test_logger PRIVATE rmm_logger_impl) - -include(../cmake/thirdparty/get_spdlog.cmake) - # test sources # device mr_ref tests @@ -168,8 +160,6 @@ ConfigureTest(STATISTICS_TEST mr/device/statistics_mr_tests.cpp) # tracking adaptor tests ConfigureTest(TRACKING_TEST mr/device/tracking_mr_tests.cpp) -target_link_libraries(TRACKING_TEST PRIVATE spdlog::spdlog_header_only) -target_link_libraries(TRACKING_PTDS_TEST PRIVATE spdlog::spdlog_header_only) # out-of-memory callback adaptor tests ConfigureTest(FAILURE_CALLBACK_TEST mr/device/failure_callback_mr_tests.cpp) @@ -206,8 +196,6 @@ ConfigureTest(PREFETCH_TEST prefetch_tests.cpp) # logger tests ConfigureTest(LOGGER_TEST logger_tests.cpp) -target_link_libraries(LOGGER_TEST PRIVATE spdlog::spdlog_header_only) -target_link_libraries(LOGGER_PTDS_TEST PRIVATE spdlog::spdlog_header_only) # arena MR tests ConfigureTest(ARENA_MR_TEST mr/device/arena_mr_tests.cpp GPUS 1 PERCENT 100) diff --git a/tests/logger_tests.cpp b/tests/logger_tests.cpp index 619143294..6f4ab96cd 100644 --- a/tests/logger_tests.cpp +++ b/tests/logger_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -152,8 +152,8 @@ TEST(Adaptor, MultiSinkConstructor) std::string filename2{temp_dir.generate_path("test_multi_2.txt")}; rmm::mr::cuda_memory_resource upstream; - auto file_sink1 = std::make_shared(filename1, true); - auto file_sink2 = std::make_shared(filename2, true); + auto file_sink1 = std::make_shared(filename1, true); + auto file_sink2 = std::make_shared(filename2, true); rmm::mr::logging_resource_adaptor log_mr{&upstream, {file_sink1, file_sink2}}; diff --git a/tests/mr/device/tracking_mr_tests.cpp b/tests/mr/device/tracking_mr_tests.cpp index c40a9127d..f723e0c65 100644 --- a/tests/mr/device/tracking_mr_tests.cpp +++ b/tests/mr/device/tracking_mr_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -202,7 +202,7 @@ TEST(TrackingTest, DeallocWrongBytes) TEST(TrackingTest, LogOutstandingAllocations) { std::ostringstream oss; - auto oss_sink = std::make_shared(oss); + auto oss_sink = std::make_shared(oss); auto old_level = rmm::default_logger().level(); rmm::default_logger().sinks().push_back(oss_sink); @@ -212,7 +212,7 @@ TEST(TrackingTest, LogOutstandingAllocations) allocations.push_back(mr.allocate(ten_MiB)); } - rmm::default_logger().set_level(rmm::level_enum::debug); + rmm::default_logger().set_level(rapids_logger::level_enum::debug); EXPECT_NO_THROW(mr.log_outstanding_allocations()); #if RMM_LOG_ACTIVE_LEVEL <= RMM_LOG_LEVEL_DEBUG