diff --git a/.clang-tidy b/.clang-tidy index 07b79a9504f..08ce8a230d4 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -7,6 +7,8 @@ Checks: | -clang-analyzer-optin.mpi.MPI-Checker, -clang-analyzer-security.FloatLoopCounter, bugprone-*, + -bugprone-easily-swappable-parameters, + -bugprone-implicit-widening-of-multiplication-result, clang-analyzer-alpha.*, modernize-deprecated-headers, modernize-make-shared, @@ -71,7 +73,7 @@ CheckOptions: - key: modernize-make-shared.IgnoreMacros value: '1' - key: modernize-make-shared.IncludeStyle - value: '0' + value: 'llvm' - key: modernize-make-shared.MakeSmartPtrFunction value: 'std::make_shared' - key: modernize-make-shared.MakeSmartPtrFunctionHeader @@ -79,7 +81,7 @@ CheckOptions: - key: modernize-make-unique.IgnoreMacros value: '1' - key: modernize-make-unique.IncludeStyle - value: '0' + value: 'llvm' - key: modernize-make-unique.MakeSmartPtrFunction value: 'std::make_unique' - key: modernize-make-unique.MakeSmartPtrFunctionHeader diff --git a/.cppcheck b/.cppcheck new file mode 100644 index 00000000000..23c339928ed --- /dev/null +++ b/.cppcheck @@ -0,0 +1,9 @@ +// clang-format off +constParameter +unusedFunction +missingIncludeSystem +noConstructor +noExplicitConstructor +redundantAssignment +uselessAssignmentPtrArg +preprocessorErrorDirective diff --git a/.github/actions/build_and_check/action.yml b/.github/actions/build_and_check/action.yml index e9ea1f140b5..2dcce230289 100644 --- a/.github/actions/build_and_check/action.yml +++ b/.github/actions/build_and_check/action.yml @@ -22,7 +22,7 @@ runs: pip3 install numpy cython h5py scipy shell: bash - run: | - export myconfig=maxset with_cuda=false test_timeout=600 with_asan=${{ inputs.asan }} with_ubsan=${{ inputs.ubsan }} check_skip_long=${{ inputs.check_skip_long }} + export myconfig=maxset with_cuda=false test_timeout=800 with_asan=${{ inputs.asan }} with_ubsan=${{ inputs.ubsan }} check_skip_long=${{ inputs.check_skip_long }} bash maintainer/CI/build_cmake.sh shell: bash # This is a workaround for the unfortunate interaction of MacOS and OpenMPI 4 diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3a18e9ebdca..c8a54090ae1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,4 +1,4 @@ -image: docker.pkg.github.com/espressomd/docker/ubuntu-20.04:254edd4a9c6e4d7b557be73158e400f5794e4f99 +image: ghcr.io/espressomd/docker/ubuntu-20.04:254edd4a9c6e4d7b557be73158e400f5794e4f99 stages: - prepare @@ -127,7 +127,7 @@ no_rotation: ubuntu:wo-dependencies: <<: *global_job_definition stage: build - image: docker.pkg.github.com/espressomd/docker/ubuntu-wo-dependencies:9ef2166b82d4c0eb258d17f5ec29f7bc39991f5d + image: ghcr.io/espressomd/docker/ubuntu-wo-dependencies:9ef2166b82d4c0eb258d17f5ec29f7bc39991f5d variables: myconfig: 'maxset' with_cuda: 'false' @@ -145,7 +145,7 @@ ubuntu:wo-dependencies: debian:10: <<: *global_job_definition stage: build - image: docker.pkg.github.com/espressomd/docker/debian:9ef2166b82d4c0eb258d17f5ec29f7bc39991f5d + image: ghcr.io/espressomd/docker/debian:9ef2166b82d4c0eb258d17f5ec29f7bc39991f5d variables: with_cuda: 'false' myconfig: 'maxset' @@ -160,7 +160,7 @@ debian:10: fedora:34: <<: *global_job_definition stage: build - image: docker.pkg.github.com/espressomd/docker/fedora:9ef2166b82d4c0eb258d17f5ec29f7bc39991f5d + image: ghcr.io/espressomd/docker/fedora:9ef2166b82d4c0eb258d17f5ec29f7bc39991f5d variables: with_cuda: 'false' myconfig: 'maxset' @@ -201,7 +201,7 @@ clang-sanitizer: fast_math: <<: *global_job_definition stage: build - image: docker.pkg.github.com/espressomd/docker/cuda:9ef2166b82d4c0eb258d17f5ec29f7bc39991f5d + image: ghcr.io/espressomd/docker/cuda:9ef2166b82d4c0eb258d17f5ec29f7bc39991f5d variables: CC: 'gcc-9' CXX: 'g++-9' @@ -222,7 +222,7 @@ fast_math: cuda11-maxset: <<: *global_job_definition stage: build - image: docker.pkg.github.com/espressomd/docker/cuda:9ef2166b82d4c0eb258d17f5ec29f7bc39991f5d + image: ghcr.io/espressomd/docker/cuda:9ef2166b82d4c0eb258d17f5ec29f7bc39991f5d variables: CC: 'gcc-9' CXX: 'g++-9' @@ -488,7 +488,7 @@ run_doxygen: - docker - linux -check_cuda_maxset_no_gpu: +maxset_no_gpu: <<: *global_job_definition stage: additional_checks when: on_success @@ -502,7 +502,7 @@ check_cuda_maxset_no_gpu: - docker - linux -check_with_odd_no_of_processors: +maxset_3_cores: <<: *global_job_definition stage: additional_checks when: on_success diff --git a/CMakeLists.txt b/CMakeLists.txt index 3635237e693..e9c9a0286b9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -77,6 +77,7 @@ option(WITH_STOKESIAN_DYNAMICS "Build with Stokesian Dynamics" OFF) option(WITH_BENCHMARKS "Enable benchmarks" OFF) option(WITH_VALGRIND_INSTRUMENTATION "Build with valgrind instrumentation markers" OFF) +option(WITH_CPPCHECK "Run Cppcheck during compilation" OFF) if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") option(WITH_CLANG_TIDY "Run Clang-Tidy during compilation" OFF) endif() @@ -97,6 +98,7 @@ endif() option(WARNINGS_ARE_ERRORS "Treat warnings as errors during compilation" OFF) option(WITH_CCACHE "Use ccache compiler invocation." OFF) option(WITH_PROFILER "Enable profiler annotations." OFF) +option(INSIDE_DOCKER "Enable when running inside Docker." OFF) set(TEST_TIMEOUT "300" CACHE STRING "Timeout in seconds for each testsuite test") @@ -126,18 +128,22 @@ include(MyConfig) include(CheckCXXSourceCompiles) -set(__PRETTYFUNC__ __func__) -foreach(func_name __PRETTY_FUNCTION__ __FUNCTION__) +# cross-platform macro to print the function name in error messages +set(PRETTY_FUNCTION_EXTENSION __func__) + +# search for a supported compiler extension that prints the function name as +# well as its list of arguments, return type and namespace +foreach(func_name __PRETTY_FUNCTION__ __FUNCSIG__ __FUNCTION__) check_cxx_source_compiles( " #include int main() { std::string(${func_name}); } " result${func_name}) if(result${func_name}) - set(__PRETTYFUNC__ ${func_name}) + set(PRETTY_FUNCTION_EXTENSION ${func_name}) break() endif(result${func_name}) -endforeach(func_name __PRETTY_FUNCTION__ __FUNCTION__) +endforeach() # # Libraries @@ -270,6 +276,32 @@ endif(WITH_VALGRIND_INSTRUMENTATION) # find_package(MPI 3.0 REQUIRED) +find_package(MpiexecBackend) + +# OpenMPI checks the number of processes against the number of CPUs +if("${MPIEXEC_BACKEND_NAME}" STREQUAL "OpenMPI" AND "${MPIEXEC_BACKEND_VERSION}" + VERSION_GREATER_EQUAL 2.0.0) + set(MPIEXEC_OVERSUBSCRIBE "-oversubscribe") +else() + set(MPIEXEC_OVERSUBSCRIBE "") +endif() + +# OpenMPI cannot run two jobs in parallel in a Docker container, because the +# same base folder is used to store the process ids of multiple jobs. Since the +# base folder is deleted upon completion of a job, other jobs will fail when +# attempting to create subdirectories in the base folder. +# https://github.com/open-mpi/ompi/issues/8510 +if("${MPIEXEC_BACKEND_NAME}" STREQUAL "OpenMPI" AND INSIDE_DOCKER) + cmake_host_system_information(RESULT hostname QUERY HOSTNAME) + function(set_mpiexec_tmpdir) + set(MPIEXEC_TMPDIR --mca orte_tmpdir_base + "/tmp/ompi.${hostname}.$ENV{USER}.${ARGV0}" PARENT_SCOPE) + endfunction() +else() + function(set_mpiexec_tmpdir) + set(MPIEXEC_TMPDIR "" PARENT_SCOPE) + endfunction() +endif() # # Boost @@ -322,9 +354,7 @@ target_compile_options( # disable warnings from -Wextra -Wno-sign-compare -Wno-unused-function - -Wno-unused-variable -Wno-unused-parameter - -Wno-missing-braces $<$:-Wno-clobbered> $<$:-wd592>) @@ -421,6 +451,19 @@ if(WITH_CLANG_TIDY) set(CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_EXE};--extra-arg=--cuda-host-only") endif() +if(WITH_CPPCHECK) + find_program(CMAKE_CXX_CPPCHECK NAMES cppcheck) + if(NOT CMAKE_CXX_CPPCHECK) + message(FATAL_ERROR "Could not find the program cppcheck.") + endif() + list(APPEND CMAKE_CXX_CPPCHECK "--enable=all" + "--std=c++${CMAKE_CXX_STANDARD}" "--quiet" "--inline-suppr" + "--suppressions-list=${CMAKE_CURRENT_SOURCE_DIR}/.cppcheck") + if(WARNINGS_ARE_ERRORS) + list(APPEND CMAKE_CXX_CPPCHECK "--error-exitcode=2") + endif() +endif() + # # Testing # diff --git a/cmake/FindMpiexecBackend.cmake b/cmake/FindMpiexecBackend.cmake new file mode 100644 index 00000000000..3ec12502150 --- /dev/null +++ b/cmake/FindMpiexecBackend.cmake @@ -0,0 +1,48 @@ +# +# Copyright (C) 2022 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +# Find the MPI backend. +# +# This code sets the following variables: +# +# MPIEXEC_BACKEND_NAME MPIEXEC_BACKEND_VERSION + +set(MPIEXEC_BACKEND_NAME "unknown") +set(MPIEXEC_BACKEND_VERSION 0.0.0) + +execute_process( + COMMAND ${MPIEXEC} --version RESULT_VARIABLE mpi_version_result + OUTPUT_VARIABLE mpi_version_output ERROR_VARIABLE mpi_version_output) +if(mpi_version_result EQUAL 0) + if(mpi_version_output MATCHES "Intel\\(R\\) MPI Library") + set(MPIEXEC_BACKEND_NAME "Intel") + string(REGEX REPLACE ".*Build ([0-9]+).*" "\\1" MPIEXEC_BACKEND_VERSION ${mpi_version_output}) + endif() + if(mpi_version_output MATCHES "HYDRA") + set(MPIEXEC_BACKEND_NAME "MPICH") + string(REGEX REPLACE ".*Version: +([0-9\\.]+).*" "\\1" MPIEXEC_BACKEND_VERSION ${mpi_version_output}) + endif() + if(mpi_version_output MATCHES "\\(Open(RTE| MPI)\\)") + set(MPIEXEC_BACKEND_NAME "OpenMPI") + string(REGEX REPLACE ".*\\(Open(RTE| MPI)\\) ([0-9\\.]+).*" "\\2" MPIEXEC_BACKEND_VERSION ${mpi_version_output}) + endif() +endif() + +include( FindPackageHandleStandardArgs ) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(MpiexecBackend REQUIRED_VARS MPIEXEC) diff --git a/cmake/FindPythonHeaders.cmake b/cmake/FindPythonHeaders.cmake index 26b96955ffa..3e5f5700254 100644 --- a/cmake/FindPythonHeaders.cmake +++ b/cmake/FindPythonHeaders.cmake @@ -19,14 +19,14 @@ # find the Python C++ headers execute_process( COMMAND ${PYTHON_EXECUTABLE} -c - "import distutils.sysconfig as cg; print(cg.get_python_inc())" + "import sysconfig; print(sysconfig.get_path('include'))" OUTPUT_VARIABLE PYTHON_INCLUDE_DIRS OUTPUT_STRIP_TRAILING_WHITESPACE) # find Python installation directory if(NOT PYTHON_INSTDIR) execute_process( COMMAND ${PYTHON_EXECUTABLE} -c - "import distutils.sysconfig as cg; print(cg.get_python_lib(prefix='${CMAKE_INSTALL_PREFIX}', plat_specific=True, standard_lib=False).replace('${CMAKE_INSTALL_PREFIX}/', '', 1))" + "import sysconfig; print(sysconfig.get_path('purelib', vars={'base': ''}).lstrip('/'))" OUTPUT_VARIABLE PYTHON_INSTDIR OUTPUT_STRIP_TRAILING_WHITESPACE) endif(NOT PYTHON_INSTDIR) diff --git a/cmake/cmake_config.cmakein b/cmake/cmake_config.cmakein index 1fae2b1fd75..8a6bfc227ed 100644 --- a/cmake/cmake_config.cmakein +++ b/cmake/cmake_config.cmakein @@ -23,7 +23,9 @@ #define PACKAGE_NAME "${PROJECT_NAME}" -#define __PRETTYFUNC__ @__PRETTYFUNC__@ - - - +/** + * @brief Compiler-specific macro containing the demangled name + * of the function in the current scope. When the current compiler + * doesn't provide such an extension, defaults to @c __func__. + */ +#define PRETTY_FUNCTION_EXTENSION @PRETTY_FUNCTION_EXTENSION@ diff --git a/cmake/unit_test.cmake b/cmake/unit_test.cmake index 014ad52c49c..225ef71be1c 100644 --- a/cmake/unit_test.cmake +++ b/cmake/unit_test.cmake @@ -1,17 +1,3 @@ -if(EXISTS ${MPIEXEC}) - # OpenMPI 2.0 and higher checks the number of processes against the number of - # CPUs - execute_process( - COMMAND ${MPIEXEC} --version RESULT_VARIABLE mpi_version_result - OUTPUT_VARIABLE mpi_version_output ERROR_VARIABLE mpi_version_output) - if(mpi_version_result EQUAL 0 AND mpi_version_output MATCHES - "\\(Open(RTE| MPI)\\) ([2-9]\\.|1[0-9])") - set(MPIEXEC_OVERSUBSCRIBE "-oversubscribe") - else() - set(MPIEXEC_OVERSUBSCRIBE "") - endif() -endif() - # unit_test function function(UNIT_TEST) cmake_parse_arguments(TEST "" "NAME;NUM_PROC" "SRC;DEPENDS" ${ARGN}) @@ -30,10 +16,11 @@ function(UNIT_TEST) if(${TEST_NUM_PROC} GREATER ${TEST_NP}) set(TEST_NUM_PROC ${TEST_NP}) endif() - + set_mpiexec_tmpdir("${TEST_NAME}") add_test(${TEST_NAME} ${MPIEXEC} ${MPIEXEC_OVERSUBSCRIBE} ${MPIEXEC_NUMPROC_FLAG} ${TEST_NUM_PROC} ${MPIEXEC_PREFLAGS} - ${CMAKE_CURRENT_BINARY_DIR}/${TEST_NAME} ${MPIEXEC_POSTFLAGS}) + ${MPIEXEC_TMPDIR} ${CMAKE_CURRENT_BINARY_DIR}/${TEST_NAME} + ${MPIEXEC_POSTFLAGS}) else() add_test(${TEST_NAME} ${TEST_NAME}) endif() diff --git a/doc/sphinx/advanced_methods.rst b/doc/sphinx/advanced_methods.rst index 1683388b345..2f1525fd5df 100644 --- a/doc/sphinx/advanced_methods.rst +++ b/doc/sphinx/advanced_methods.rst @@ -1707,7 +1707,7 @@ In the *forward* reaction, the appropriate number of reactants (given by :math:`\nu_i`) is removed from the system, and the concomitant number of products is inserted into the system. In the *backward* reaction, reactants and products exchange their roles. The acceptance probability -:math:`P^{\xi}` for move from state :math:`o` to :math:`n` reaction +:math:`P^{\xi}` for a move from state :math:`o` to :math:`n` in the reaction ensemble is given by the criterion :cite:`smith94c` .. math:: @@ -1718,10 +1718,11 @@ ensemble is given by the criterion :cite:`smith94c` where :math:`\Delta E=E_\mathrm{new}-E_\mathrm{old}` is the change in potential energy, :math:`V` is the simulation box volume, -and :math:`\beta=1/k_\mathrm{B}T`. -The extent of reaction, :math:`\xi=1` for the forward, and +:math:`\beta=1/k_\mathrm{B}T` is the Boltzmann factor, and +:math:`\xi` is the extent of reaction, with :math:`\xi=1` for the forward and :math:`\xi=-1` for the backward direction. -The parameter :math:`\Gamma` proportional to the reaction constant. It is defined as + +:math:`\Gamma` is proportional to the reaction constant. It is defined as .. math:: diff --git a/doc/sphinx/inter_bonded.rst b/doc/sphinx/inter_bonded.rst index 84e6458e32d..465fb40f046 100644 --- a/doc/sphinx/inter_bonded.rst +++ b/doc/sphinx/inter_bonded.rst @@ -231,13 +231,13 @@ A virtual bond can be instantiated via :class:`espressomd.interactions.Virtual`:: import espressomd.interactions - tab = espressomd.interactions.Virtual() + vb = espressomd.interactions.Virtual() This creates a virtual bond type identifier for a pair bond without associated potential or force. It can be used to specify topologies and for some analysis that rely on bonds, or for bonds that should be -displayed in the visualization. +displayed in the visualizer. diff --git a/doc/sphinx/io.rst b/doc/sphinx/io.rst index 1c07a34362f..684e854aa7f 100644 --- a/doc/sphinx/io.rst +++ b/doc/sphinx/io.rst @@ -216,14 +216,16 @@ capabilities. The usage is quite simple: .. code:: python - import espressomd.io.mppiio + import espressomd + import espressomd.io system = espressomd.System(box_l=[1, 1, 1]) # ... add particles here - espressomd.io.mppiio.mpiio.write("/tmp/mydata", positions=True, velocities=True, types=True, bonds=True) + mpiio = espressomd.io.mpiio.Mpiio() + mpiio.write("/tmp/mydata", positions=True, velocities=True, types=True, bonds=True) -Here, :file:`/tmp/mydata` is the prefix used for several files. The call will output -particle positions, velocities, types and their bonds to the following files in -folder :file:`/tmp`: +Here, :file:`/tmp/mydata` is the prefix used to generate several files. +The call will output particle positions, velocities, types and their bonds +to the following files in folder :file:`/tmp`: - :file:`mydata.head` - :file:`mydata.id` @@ -235,11 +237,25 @@ folder :file:`/tmp`: - :file:`mydata.bond` Depending on the chosen output, not all of these files might be created. -To read these in again, simply call :meth:`espressomd.io.mpiio.Mpiio.read`. It has the same signature as -:meth:`espressomd.io.mpiio.Mpiio.write`. - -*WARNING*: Do not attempt to read these binary files on a machine with a different -architecture! +To read these in again, simply call :meth:`espressomd.io.mpiio.Mpiio.read`. +It has the same signature as :meth:`espressomd.io.mpiio.Mpiio.write`. +When writing files, make sure the prefix hasn't been used before +(e.g. by a different simulation script), otherwise the write operation +will fail to avoid accidentally overwriting pre-existing data. Likewise, +reading incomplete data (or complete data but with the wrong number of MPI +ranks) will throw an error. + +*WARNING*: Do not attempt to read these binary files on a machine +with a different architecture! This will read malformed data without +necessarily throwing an error. + +In case of read failure or write failure, the simulation will halt. +On 1 MPI rank, the simulation will halt with a python runtime error. +This exception can be recovered from; in case of a write operation, +any written file must be deleted before attempting to write again +(since the prefix argument must be unique). On more than 1 MPI rank, +the simulation will halt with a call to ``MPI_Abort`` and will send +the ``SIGABRT`` signal. .. _Writing VTF files: diff --git a/doc/sphinx/system_setup.rst b/doc/sphinx/system_setup.rst index 84f9569606d..7a260284b25 100644 --- a/doc/sphinx/system_setup.rst +++ b/doc/sphinx/system_setup.rst @@ -120,21 +120,21 @@ Details about the cell system can be obtained by :meth:`espressomd.system.System * ``type`` The current type of the cell system. * ``verlet_reuse`` Average number of integration steps the Verlet list is re-used. -.. _Domain decomposition: +.. _Regular decomposition: -Domain decomposition -~~~~~~~~~~~~~~~~~~~~ +Regular decomposition +~~~~~~~~~~~~~~~~~~~~~ -Invoking :py:meth:`~espressomd.cellsystem.CellSystem.set_domain_decomposition` -selects the domain decomposition cell scheme, using Verlet lists +Invoking :py:meth:`~espressomd.cellsystem.CellSystem.set_regular_decomposition` +selects the regular decomposition cell scheme, using Verlet lists for the calculation of the interactions. If you specify ``use_verlet_lists=False``, only the -domain decomposition is used, but not the Verlet lists. :: +regular decomposition is used, but not the Verlet lists. :: system = espressomd.System(box_l=[1, 1, 1]) - system.cell_system.set_domain_decomposition(use_verlet_lists=True) + system.cell_system.set_regular_decomposition(use_verlet_lists=True) -The domain decomposition cellsystem is the default system and suits most +The regular decomposition cellsystem is the default system and suits most applications with short ranged interactions. The particles are divided up spatially into small compartments, the cells, such that the cell size is larger than the maximal interaction range. In this case interactions diff --git a/doc/sphinx/under_the_hood.rst b/doc/sphinx/under_the_hood.rst index b8e2804397f..f841946de52 100644 --- a/doc/sphinx/under_the_hood.rst +++ b/doc/sphinx/under_the_hood.rst @@ -27,7 +27,7 @@ how they are distributed onto the processor nodes and how they are organized on each of them. Moreover a cell system also defines procedures to efficiently calculate the force, energy and pressure for the short ranged interactions, since these can be heavily optimized -depending on the cell system. For example, the domain decomposition +depending on the cell system. For example, the regular decomposition cellsystem allows an order N interactions evaluation. Technically, a cell is organized as a dynamically growing array, not as @@ -41,7 +41,7 @@ without direct knowledge of the currently used cell system. Only the force, energy and pressure loops are implemented separately for each cell model as explained above. -The domain decomposition or link cell algorithm is implemented such +The regular decomposition or link cell algorithm is implemented such that the cells equal the cells, i.e. each cell is a separate particle list. For an example let us assume that the simulation box has size :math:`20\times 20\times 20` and that we assign 2 processors to the @@ -108,7 +108,7 @@ memory organization of |es|, the particles are accessed in a virtually linear order. Because the force calculation goes through the cells in a linear fashion, all accesses to a single cell occur close in time, for the force calculation of the cell itself as well as for its neighbors. -Using the domain decomposition cell scheme, two cell layers have to be +Using the regular decomposition cell scheme, two cell layers have to be kept in the processor cache. For 10000 particles and a typical cell grid size of 20, these two cell layers consume roughly 200 KBytes, which nearly fits into the L2 cache. Therefore every cell has to be read from diff --git a/doc/tutorials/constant_pH/constant_pH.ipynb b/doc/tutorials/constant_pH/constant_pH.ipynb index 0c8724d48ed..0aa3e62cca6 100644 --- a/doc/tutorials/constant_pH/constant_pH.ipynb +++ b/doc/tutorials/constant_pH/constant_pH.ipynb @@ -503,7 +503,7 @@ "After the initial relaxation we set the electrostatic interactions between the particles if it has been enabled via the `USE_ELECTROSTATICS` flag. For electrostatics can use either the Debye-Hückel `DH` algorithm or the `P3M` algorithm. The `DH` algorithm is based on the Debye-Hückel approximation, the assumptions of which are not satisfied in our simulated system. However, it runs much faster than the `P3M` algorithm, and the approximate result closely resembles the correct one. Therefore, the `DH` algorithm should not be used in production simulations. By using the `DH` algorithm in this tutorial, we sacrifice the accuracy for speed.\n", "To obtain an accurate result, we can use the `P3M` algorithm using `accuracy` of $10^{-3}$ as an acceptable tradeoff between accuracy and performance. For production runs it might be necessary to use a lower value of `accuracy`, depending on the simulated system.\n", "\n", - "By default, ESPResSo uses the domain decomposition cell system to speed up the calculation of short-range interactions. However, for a system with small number of particles and without electrostatics, it runs faster with `n_square` cell system. See the [user guide](https://espressomd.github.io/doc/system_setup.html#n-squared) for additional details on the cell systems." + "By default, ESPResSo uses the regular decomposition cell system to speed up the calculation of short-range interactions. However, for a system with small number of particles and without electrostatics, it runs faster with `n_square` cell system. See the [user guide](https://espressomd.github.io/doc/system_setup.html#n-squared) for additional details on the cell systems." ] }, { @@ -581,9 +581,10 @@ "RE = espressomd.reaction_ensemble.ConstantpHEnsemble(\n", " kT=KT_REDUCED,\n", " exclusion_radius=exclusion_radius,\n", - " seed=77\n", + " seed=77,\n", + " constant_pH=2 # temporary value\n", ")\n", - "RE.set_non_interacting_type(len(TYPES)) # this parameter helps speed up the calculation in an interacting system\n", + "RE.set_non_interacting_type(type=len(TYPES)) # this parameter helps speed up the calculation in an interacting system\n", "```" ] }, @@ -682,7 +683,7 @@ "source": [ "```python\n", "def equilibrate_reaction(reaction_steps=1):\n", - " RE.reaction(reaction_steps)\n", + " RE.reaction(reaction_steps=reaction_steps)\n", "```" ] }, @@ -739,7 +740,7 @@ " if USE_WCA and np.random.random() < prob_integration:\n", " system.integrator.run(integration_steps)\n", " # we should do at least one reaction attempt per reactive particle\n", - " RE.reaction(reaction_steps) \n", + " RE.reaction(reaction_steps=reaction_steps) \n", " num_As[i] = system.number_of_particles(type=type_A)\n", "```" ] diff --git a/maintainer/CI/build_cmake.sh b/maintainer/CI/build_cmake.sh index 023c0f53a0c..91b44886c75 100755 --- a/maintainer/CI/build_cmake.sh +++ b/maintainer/CI/build_cmake.sh @@ -122,7 +122,7 @@ if [ "${with_fast_math}" = true ]; then fi cmake_params="-DCMAKE_BUILD_TYPE=${build_type} -DCMAKE_CXX_STANDARD=${with_cxx_standard} -DWARNINGS_ARE_ERRORS=ON ${cmake_params}" -cmake_params="${cmake_params} -DCMAKE_INSTALL_PREFIX=/tmp/espresso-unit-tests" +cmake_params="${cmake_params} -DCMAKE_INSTALL_PREFIX=/tmp/espresso-unit-tests -DINSIDE_DOCKER=ON" cmake_params="${cmake_params} -DCTEST_ARGS=-j${check_procs} -DTEST_TIMEOUT=${test_timeout}" if [ "${make_check_benchmarks}" = true ]; then diff --git a/maintainer/benchmarks/CMakeLists.txt b/maintainer/benchmarks/CMakeLists.txt index 25213e94aba..5a7f576b572 100644 --- a/maintainer/benchmarks/CMakeLists.txt +++ b/maintainer/benchmarks/CMakeLists.txt @@ -15,6 +15,12 @@ if(EXISTS ${MPIEXEC}) endif() endif() +function(SET_BENCHMARK_PROPERTIES) + set_tests_properties( + ${ARGV0} PROPERTIES RUN_SERIAL TRUE SKIP_REGULAR_EXPRESSION + "espressomd.FeaturesError: Missing features") +endfunction() + function(PYTHON_BENCHMARK) cmake_parse_arguments( BENCHMARK "" "FILE;RUN_WITH_MPI;MIN_NUM_PROC;MAX_NUM_PROC" @@ -63,14 +69,14 @@ function(PYTHON_BENCHMARK) ${MPIEXEC} ${MPIEXEC_OVERSUBSCRIBE} ${MPIEXEC_NUMPROC_FLAG} ${nproc} ${MPIEXEC_PREFLAGS} ${CMAKE_BINARY_DIR}/pypresso ${BENCHMARK_FILE} ${BENCHMARK_ARGUMENTS} ${MPIEXEC_POSTFLAGS}) - set_tests_properties(${BENCHMARK_TEST_NAME} PROPERTIES RUN_SERIAL TRUE) + set_benchmark_properties(${BENCHMARK_TEST_NAME}) endforeach(nproc) else() set(BENCHMARK_TEST_NAME benchmark__${BENCHMARK_NAME}__serial) add_test(NAME ${BENCHMARK_TEST_NAME} COMMAND ${CMAKE_BINARY_DIR}/pypresso ${BENCHMARK_FILE} ${BENCHMARK_ARGUMENTS}) - set_tests_properties(${BENCHMARK_TEST_NAME} PROPERTIES RUN_SERIAL TRUE) + set_benchmark_properties(${BENCHMARK_TEST_NAME}) endif() endfunction(PYTHON_BENCHMARK) diff --git a/maintainer/benchmarks/mc_acid_base_reservoir.py b/maintainer/benchmarks/mc_acid_base_reservoir.py index 7a987000c63..7798fc33796 100644 --- a/maintainer/benchmarks/mc_acid_base_reservoir.py +++ b/maintainer/benchmarks/mc_acid_base_reservoir.py @@ -234,7 +234,7 @@ def calc_donnan_coefficient(c_acid, I_res, charge=-1): seed=77 ) # this parameter helps speed up the calculation in an interacting system -RE.set_non_interacting_type(max(TYPES.values()) + 1) +RE.set_non_interacting_type(type=max(TYPES.values()) + 1) RE.add_reaction( gamma=K_NaCl_reduced, @@ -259,7 +259,7 @@ def calc_donnan_coefficient(c_acid, I_res, charge=-1): def equilibrate_reaction(reaction_steps=1): - RE.reaction(reaction_steps) + RE.reaction(reaction_steps=reaction_steps) def report_progress(system, i, next_i): @@ -295,7 +295,7 @@ def report_progress(system, i, next_i): if MC_STEPS_PER_SAMPLE > 0: tick_MC = time.time() - RE.reaction(MC_STEPS_PER_SAMPLE) + RE.reaction(reaction_steps=MC_STEPS_PER_SAMPLE) tock_MC = time.time() t_MC = (tock_MC - tick_MC) / MC_STEPS_PER_SAMPLE @@ -332,7 +332,7 @@ def report_progress(system, i, next_i): for i in range(NUM_SAMPLES): if RUN_INTEGRATION: system.integrator.run(INTEGRATION_STEPS_PER_SAMPLE) - RE.reaction(MC_STEPS_PER_SAMPLE) + RE.reaction(reaction_steps=MC_STEPS_PER_SAMPLE) n_A = system.number_of_particles(type=TYPES['A']) n_As.append(n_A) n_All = len(system.part) diff --git a/maintainer/benchmarks/p3m.py b/maintainer/benchmarks/p3m.py index 78a825c50fe..262cc675dae 100644 --- a/maintainer/benchmarks/p3m.py +++ b/maintainer/benchmarks/p3m.py @@ -87,7 +87,7 @@ # System ############################################################# system.box_l = 3 * (box_l,) -system.cell_system.set_domain_decomposition(use_verlet_lists=True) +system.cell_system.set_regular_decomposition(use_verlet_lists=True) # Integration parameters ############################################################# diff --git a/maintainer/benchmarks/runner.sh b/maintainer/benchmarks/runner.sh index 0f8130672ed..1153936a7a7 100644 --- a/maintainer/benchmarks/runner.sh +++ b/maintainer/benchmarks/runner.sh @@ -36,7 +36,7 @@ for config in ${configs}; do # add minimal features for the benchmarks to run sed -i '1 i\#define ELECTROSTATICS\n#define LENNARD_JONES\n#define MASS\n' "${config}" # remove checks - sed -ri "s/#define\s+ADDITIONAL_CHECKS//" "${config}" + sed -ri "/#define\s+ADDITIONAL_CHECKS/d" "${config}" done cat > benchmarks.csv << EOF diff --git a/maintainer/benchmarks/suite.sh b/maintainer/benchmarks/suite.sh index 4e49fb14df6..2c6a223a23b 100644 --- a/maintainer/benchmarks/suite.sh +++ b/maintainer/benchmarks/suite.sh @@ -51,7 +51,7 @@ cd "${build_dir}" # check for unstaged changes if [ -n "$(git status --porcelain -- ${directories})" ]; then echo "fatal: you have unstaged changes, please commit or stash them:" - git diff-index --name-only HEAD -- ${directories} + git status --porcelain -- ${directories} exit 1 fi diff --git a/samples/chamber_game.py b/samples/chamber_game.py index 3a192fac16b..63621e72ad2 100644 --- a/samples/chamber_game.py +++ b/samples/chamber_game.py @@ -156,7 +156,7 @@ # CELLSYSTEM system.cell_system.skin = 3.0 -system.cell_system.set_domain_decomposition(use_verlet_lists=False) +system.cell_system.set_regular_decomposition(use_verlet_lists=False) # BONDS diff --git a/samples/grand_canonical.py b/samples/grand_canonical.py index 79549132bbd..c0b0a77f82b 100644 --- a/samples/grand_canonical.py +++ b/samples/grand_canonical.py @@ -99,9 +99,9 @@ # Set the hidden particle type to the lowest possible number to speed # up the simulation -RE.set_non_interacting_type(max(types) + 1) +RE.set_non_interacting_type(type=max(types) + 1) -RE.reaction(10000) +RE.reaction(reaction_steps=10000) p3m = espressomd.electrostatics.P3M(prefactor=2.0, accuracy=1e-3) system.actors.add(p3m) @@ -134,14 +134,14 @@ system.thermostat.set_langevin(kT=temperature, gamma=.5, seed=42) # MC warmup -RE.reaction(1000) +RE.reaction(reaction_steps=1000) n_int_cycles = 10000 n_int_steps = 600 num_As = [] deviation = None for i in range(n_int_cycles): - RE.reaction(10) + RE.reaction(reaction_steps=10) system.integrator.run(steps=n_int_steps) num_As.append(system.number_of_particles(type=1)) if i > 2 and i % 50 == 0: diff --git a/samples/reaction_ensemble.py b/samples/reaction_ensemble.py index edaa86027b8..1f1c0e91a3a 100644 --- a/samples/reaction_ensemble.py +++ b/samples/reaction_ensemble.py @@ -90,8 +90,7 @@ default_charges=charge_dict) elif args.mode == "constant_pH_ensemble": RE = espressomd.reaction_ensemble.ConstantpHEnsemble( - kT=1, exclusion_radius=1, seed=77) - RE.constant_pH = 2 + kT=1, exclusion_radius=1, seed=77, constant_pH=2) RE.add_reaction(gamma=K_diss, reactant_types=[types["HA"]], product_types=[types["A-"], types["H+"]], default_charges=charge_dict) @@ -105,14 +104,20 @@ # Set the hidden particle type to the lowest possible number to speed # up the simulation -RE.set_non_interacting_type(max(types.values()) + 1) +RE.set_non_interacting_type(type=max(types.values()) + 1) for i in range(10000): - RE.reaction() + RE.reaction(reaction_steps=1) if i % 100 == 0: print("HA", system.number_of_particles(type=types["HA"]), "A-", system.number_of_particles(type=types["A-"]), "H+", system.number_of_particles(type=types["H+"])) -print("reaction 0 has acceptance rate: ", RE.get_acceptance_rate_reaction(0)) -print("reaction 1 has acceptance rate: ", RE.get_acceptance_rate_reaction(1)) +print( + "reaction 0 has acceptance rate: ", + RE.get_acceptance_rate_reaction( + reaction_id=0)) +print( + "reaction 1 has acceptance rate: ", + RE.get_acceptance_rate_reaction( + reaction_id=1)) diff --git a/samples/reaction_ensemble_complex_reaction.py b/samples/reaction_ensemble_complex_reaction.py index 4fc3b3a16cd..33de01ca339 100644 --- a/samples/reaction_ensemble_complex_reaction.py +++ b/samples/reaction_ensemble_complex_reaction.py @@ -97,13 +97,13 @@ # Set the hidden particle type to the lowest possible number to speed # up the simulation -RE.set_non_interacting_type(max(types) + 1) +RE.set_non_interacting_type(type=max(types) + 1) # warmup -RE.reaction(200) +RE.reaction(reaction_steps=200) for i in range(200): - RE.reaction(10) + RE.reaction(reaction_steps=10) for _type in types: numbers[_type].append(system.number_of_particles(type=_type)) diff --git a/samples/visualization_cellsystem.py b/samples/visualization_cellsystem.py index 2eb015ddf44..c675088d88f 100644 --- a/samples/visualization_cellsystem.py +++ b/samples/visualization_cellsystem.py @@ -42,7 +42,7 @@ draw_cells=True) system.time_step = 0.0005 -system.cell_system.set_domain_decomposition(use_verlet_lists=True) +system.cell_system.set_regular_decomposition(use_verlet_lists=True) system.cell_system.skin = 0.4 #system.cell_system.node_grid = [i, j, k] diff --git a/samples/visualization_charged.py b/samples/visualization_charged.py index a3786933a1f..6375c2aac6d 100644 --- a/samples/visualization_charged.py +++ b/samples/visualization_charged.py @@ -29,7 +29,7 @@ box = [40, 40, 40] system = espressomd.System(box_l=box) -system.cell_system.set_domain_decomposition(use_verlet_lists=True) +system.cell_system.set_regular_decomposition(use_verlet_lists=True) visualizer = espressomd.visualization_opengl.openGLLive( system, background_color=[1, 1, 1], drag_enabled=True, drag_force=10) diff --git a/samples/widom_insertion.py b/samples/widom_insertion.py index 356361d662d..e2dcc7c129b 100644 --- a/samples/widom_insertion.py +++ b/samples/widom_insertion.py @@ -116,7 +116,7 @@ # Set the hidden particle type to the lowest possible number to speed # up the simulation -widom.set_non_interacting_type(max(types) + 1) +widom.set_non_interacting_type(type=max(types) + 1) particle_insertion_potential_energy_samples = [] @@ -126,7 +126,7 @@ for i in range(n_iterations): for _ in range(n_samples_per_iteration): particle_insertion_potential_energy_samples.append( - widom.calculate_particle_insertion_potential_energy(0)) + widom.calculate_particle_insertion_potential_energy(reaction_id=insertion_reaction_id)) system.integrator.run(steps=500) if i % 20 == 0: diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 4702a0b2baf..c6fd3ecc3d5 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -29,6 +29,7 @@ set(EspressoCore_SRC rotate_system.cpp rotation.cpp Observable_stat.cpp + RegularDecomposition.cpp RuntimeErrorCollector.cpp RuntimeError.cpp RuntimeErrorStream.cpp @@ -42,8 +43,7 @@ set(EspressoCore_SRC CellStructure.cpp PartCfg.cpp AtomDecomposition.cpp - EspressoSystemStandAlone.cpp - DomainDecomposition.cpp) + EspressoSystemStandAlone.cpp) if(CUDA) set(EspressoCuda_SRC diff --git a/src/core/CellStructure.cpp b/src/core/CellStructure.cpp index a1a5921ea39..99a813db0fb 100644 --- a/src/core/CellStructure.cpp +++ b/src/core/CellStructure.cpp @@ -22,7 +22,8 @@ #include "CellStructure.hpp" #include "AtomDecomposition.hpp" -#include "DomainDecomposition.hpp" +#include "CellStructureType.hpp" +#include "RegularDecomposition.hpp" #include @@ -241,15 +242,18 @@ void CellStructure::resort_particles(int global_flag) { } void CellStructure::set_atom_decomposition(boost::mpi::communicator const &comm, - BoxGeometry const &box) { + BoxGeometry const &box, + LocalBox &local_geo) { set_particle_decomposition(std::make_unique(comm, box)); - m_type = CELL_STRUCTURE_NSQUARE; + m_type = CellStructureType::CELL_STRUCTURE_NSQUARE; + local_geo.set_cell_structure_type(m_type); } -void CellStructure::set_domain_decomposition( +void CellStructure::set_regular_decomposition( boost::mpi::communicator const &comm, double range, BoxGeometry const &box, - LocalBox const &local_geo) { + LocalBox &local_geo) { set_particle_decomposition( - std::make_unique(comm, range, box, local_geo)); - m_type = CELL_STRUCTURE_DOMDEC; + std::make_unique(comm, range, box, local_geo)); + m_type = CellStructureType::CELL_STRUCTURE_REGULAR; + local_geo.set_cell_structure_type(m_type); } diff --git a/src/core/CellStructure.hpp b/src/core/CellStructure.hpp index d792ae99a55..1f3ee14cc33 100644 --- a/src/core/CellStructure.hpp +++ b/src/core/CellStructure.hpp @@ -25,6 +25,7 @@ #include "AtomDecomposition.hpp" #include "BoxGeometry.hpp" #include "Cell.hpp" +#include "CellStructureType.hpp" #include "LocalBox.hpp" #include "Particle.hpp" #include "ParticleDecomposition.hpp" @@ -48,14 +49,6 @@ #include #include -/** Cell Structure */ -enum CellStructureType : int { - /** cell structure domain decomposition */ - CELL_STRUCTURE_DOMDEC = 1, - /** cell structure n square */ - CELL_STRUCTURE_NSQUARE = 2 -}; - namespace Cells { enum Resort : unsigned { RESORT_NONE = 0u, @@ -132,7 +125,7 @@ struct CellStructure { std::unique_ptr m_decomposition = std::make_unique(); /** Active type in m_decomposition */ - int m_type = CELL_STRUCTURE_NSQUARE; + CellStructureType m_type = CellStructureType::CELL_STRUCTURE_NSQUARE; /** One of @ref Cells::Resort, announces the level of resort needed. */ unsigned m_resort_particles = Cells::RESORT_NONE; @@ -153,6 +146,7 @@ struct CellStructure { */ void update_particle_index(int id, Particle *p) { assert(id >= 0); + // cppcheck-suppress assertWithSideEffect assert(not p or id == p->identity()); if (id >= m_particle_index.size()) @@ -246,7 +240,7 @@ struct CellStructure { } public: - int decomposition_type() const { return m_type; } + CellStructureType decomposition_type() const { return m_type; } /** Maximal cutoff supported by current cell system. */ Utils::Vector3d max_cutoff() const; @@ -496,22 +490,24 @@ struct CellStructure { * @brief Set the particle decomposition to AtomDecomposition. * * @param comm Communicator to use. - * @param box Box Geometry + * @param box Box Geometry. + * @param local_geo Geometry of the local box (holds cell structure type). */ void set_atom_decomposition(boost::mpi::communicator const &comm, - BoxGeometry const &box); + BoxGeometry const &box, + LocalBox &local_geo); /** - * @brief Set the particle decomposition to DomainDecomposition. + * @brief Set the particle decomposition to RegularDecomposition. * * @param comm Cartesian communicator to use. * @param range Interaction range. - * @param box Box Geometry + * @param box Box Geometry. * @param local_geo Geometry of the local box. */ - void set_domain_decomposition(boost::mpi::communicator const &comm, - double range, BoxGeometry const &box, - LocalBox const &local_geo); + void set_regular_decomposition(boost::mpi::communicator const &comm, + double range, BoxGeometry const &box, + LocalBox &local_geo); public: template void bond_loop(BondKernel const &bond_kernel) { diff --git a/src/core/CellStructureType.hpp b/src/core/CellStructureType.hpp new file mode 100644 index 00000000000..b3d7ffa56e5 --- /dev/null +++ b/src/core/CellStructureType.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2010-2020 The ESPResSo project + * Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009,2010 + * Max-Planck-Institute for Polymer Research, Theory Group + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef ESPRESSO_CELLSTRUCTURETYPE_HPP +#define ESPRESSO_CELLSTRUCTURETYPE_HPP + +/** Cell structure */ +enum class CellStructureType : int { + /** cell structure regular decomposition */ + CELL_STRUCTURE_REGULAR = 1, + /** cell structure n square */ + CELL_STRUCTURE_NSQUARE = 2 +}; + +#endif // ESPRESSO_CELLSTRUCTURETYPE_HPP diff --git a/src/core/EspressoSystemInterface.hpp b/src/core/EspressoSystemInterface.hpp index ef5b27e973d..9a146495a66 100644 --- a/src/core/EspressoSystemInterface.hpp +++ b/src/core/EspressoSystemInterface.hpp @@ -47,74 +47,72 @@ class EspressoSystemInterface : public SystemInterface { m_instance = new EspressoSystemInterface; return *m_instance; - }; + } void init() override; void update() override; #ifdef CUDA - float *rGpuBegin() override { return m_r_gpu_begin; }; - bool hasRGpu() override { return true; }; + float *rGpuBegin() override { return m_r_gpu_begin; } + bool hasRGpu() override { return true; } void requestRGpu() override { m_needsRGpu = hasRGpu(); m_splitParticleStructGpu |= m_needsRGpu; m_gpu |= m_needsRGpu; enableParticleCommunication(); - }; + } #ifdef DIPOLES - float *dipGpuBegin() override { return m_dip_gpu_begin; }; - bool hasDipGpu() override { return true; }; + float *dipGpuBegin() override { return m_dip_gpu_begin; } + bool hasDipGpu() override { return true; } void requestDipGpu() override { m_needsDipGpu = hasDipGpu(); m_splitParticleStructGpu |= m_needsRGpu; m_gpu |= m_needsRGpu; enableParticleCommunication(); - }; + } #endif #ifdef ELECTROSTATICS - float *qGpuBegin() override { return m_q_gpu_begin; }; - bool hasQGpu() override { return true; }; + float *qGpuBegin() override { return m_q_gpu_begin; } + bool hasQGpu() override { return true; } void requestQGpu() override { m_needsQGpu = hasQGpu(); m_splitParticleStructGpu |= m_needsQGpu; m_gpu |= m_needsQGpu; enableParticleCommunication(); - }; + } #endif void requestParticleStructGpu() { m_needsParticleStructGpu = true; m_gpu |= m_needsParticleStructGpu; enableParticleCommunication(); - }; + } - float *fGpuBegin() override { return gpu_get_particle_force_pointer(); }; - bool hasFGpu() override { return true; }; + float *fGpuBegin() override { return gpu_get_particle_force_pointer(); } + bool hasFGpu() override { return true; } void requestFGpu() override { m_needsFGpu = hasFGpu(); m_gpu |= m_needsFGpu; enableParticleCommunication(); - }; + } #ifdef ROTATION - float *torqueGpuBegin() override { - return gpu_get_particle_torque_pointer(); - }; - bool hasTorqueGpu() override { return true; }; + float *torqueGpuBegin() override { return gpu_get_particle_torque_pointer(); } + bool hasTorqueGpu() override { return true; } void requestTorqueGpu() override { m_needsTorqueGpu = hasTorqueGpu(); m_gpu |= m_needsTorqueGpu; enableParticleCommunication(); - }; + } #endif float *eGpu() override { // cast pointer from struct of floats to array of floats // https://stackoverflow.com/a/29278260 return reinterpret_cast(gpu_get_energy_pointer()); - }; + } #endif // ifdef CUDA @@ -126,7 +124,7 @@ class EspressoSystemInterface : public SystemInterface { #else return 0; #endif - }; + } protected: static EspressoSystemInterface *m_instance; diff --git a/src/core/LocalBox.hpp b/src/core/LocalBox.hpp index d9fdb774daa..bd36f0aec08 100644 --- a/src/core/LocalBox.hpp +++ b/src/core/LocalBox.hpp @@ -19,6 +19,7 @@ #ifndef ESPRESSO_LOCALBOX_HPP #define ESPRESSO_LOCALBOX_HPP +#include "CellStructureType.hpp" #include template class LocalBox { @@ -26,15 +27,17 @@ template class LocalBox { Utils::Vector m_lower_corner = {0, 0, 0}; Utils::Vector m_upper_corner = {1, 1, 1}; Utils::Array m_boundaries = {}; + CellStructureType m_cell_structure_type; public: LocalBox() = default; LocalBox(Utils::Vector const &lower_corner, Utils::Vector const &local_box_length, - Utils::Array const &boundaries) + Utils::Array const &boundaries, + CellStructureType const cell_structure_type) : m_local_box_l(local_box_length), m_lower_corner(lower_corner), m_upper_corner(lower_corner + local_box_length), - m_boundaries(boundaries) {} + m_boundaries(boundaries), m_cell_structure_type(cell_structure_type) {} /** Left (bottom, front) corner of this nodes local box. */ Utils::Vector const &my_left() const { return m_lower_corner; } @@ -52,6 +55,16 @@ template class LocalBox { * @return Array with boundary information. */ Utils::Array const &boundary() const { return m_boundaries; } + + /** Return cell structure type. */ + CellStructureType const &cell_structure_type() const { + return m_cell_structure_type; + } + + /** Set cell structure type. */ + void set_cell_structure_type(CellStructureType cell_structure_type) { + m_cell_structure_type = cell_structure_type; + } }; #endif diff --git a/src/core/MpiCallbacks.hpp b/src/core/MpiCallbacks.hpp index 6c28cf079f1..21d7fd03269 100644 --- a/src/core/MpiCallbacks.hpp +++ b/src/core/MpiCallbacks.hpp @@ -186,7 +186,7 @@ struct callback_ignore_t final : public callback_concept_t { template explicit callback_ignore_t(FRef &&f) : m_f(std::forward(f)) {} - void operator()(boost::mpi::communicator const &comm, + void operator()(boost::mpi::communicator const &, boost::mpi::packed_iarchive &ia) const override { detail::invoke(m_f, ia); } @@ -239,7 +239,7 @@ struct callback_main_rank_t final : public callback_concept_t { template explicit callback_main_rank_t(FRef &&f) : m_f(std::forward(f)) {} - void operator()(boost::mpi::communicator const &comm, + void operator()(boost::mpi::communicator const &, boost::mpi::packed_iarchive &ia) const override { (void)detail::invoke(m_f, ia); } @@ -411,9 +411,9 @@ class MpiCallbacks { static auto &static_callbacks() { static std::vector< std::pair>> - m_callbacks; + callbacks; - return m_callbacks; + return callbacks; } public: diff --git a/src/core/Observable_stat.hpp b/src/core/Observable_stat.hpp index 72109bcf892..125c76ce983 100644 --- a/src/core/Observable_stat.hpp +++ b/src/core/Observable_stat.hpp @@ -56,18 +56,16 @@ class Observable_stat { if (m_chunk_size == 1) return boost::accumulate(m_data, acc); - for (auto it = m_data.begin() + column; it < m_data.end(); - it += m_chunk_size) + for (auto it = m_data.begin() + static_cast(column); + it < m_data.end(); it += static_cast(m_chunk_size)) acc += *it; return acc; } /** Rescale values */ void rescale(double volume) { - auto const factor = 1. / volume; - for (auto &e : m_data) { - e *= factor; - } + auto const fac = 1. / volume; + boost::transform(m_data, m_data.begin(), [fac](auto e) { return e * fac; }); } /** Contribution from linear and angular kinetic energy (accumulated). */ @@ -90,7 +88,7 @@ class Observable_stat { /** Get contribution from a bonded interaction */ Utils::Span bonded_contribution(int bond_id) const { auto const offset = m_chunk_size * static_cast(bond_id); - return Utils::Span(bonded.data() + offset, m_chunk_size); + return {bonded.data() + offset, m_chunk_size}; } void add_non_bonded_contribution(int type1, int type2, diff --git a/src/core/PartCfg.cpp b/src/core/PartCfg.cpp index aa890870454..a2437195526 100644 --- a/src/core/PartCfg.cpp +++ b/src/core/PartCfg.cpp @@ -50,8 +50,8 @@ void PartCfg::update() { m_parts.push_back(get_particle_data(id)); auto &p = m_parts.back(); - p.r.p += image_shift(p.l.i, box_geo.length()); - p.l.i = {}; + p.pos() += image_shift(p.image_box(), box_geo.length()); + p.image_box() = {}; } offset += this_size; diff --git a/src/core/Particle.hpp b/src/core/Particle.hpp index 004715e240a..8243f3610c3 100644 --- a/src/core/Particle.hpp +++ b/src/core/Particle.hpp @@ -29,10 +29,21 @@ #include +#include #include #include #include +namespace detail { +inline void check_axis_idx_valid(int const axis) { + assert(axis >= 0 and axis <= 2); +} + +inline bool get_nth_bit(uint8_t const bitfield, int const bit_idx) { + return bitfield & (1u << bit_idx); +} +} // namespace detail + enum : uint8_t { ROTATION_FIXED = 0u, ROTATION_X = 1u, @@ -57,9 +68,9 @@ enum : uint8_t { struct ParticleParametersSwimming { /** Is the particle a swimmer. */ bool swimming = false; - /** Constant velocity to relax to. */ - double f_swim = 0.; /** Imposed constant force. */ + double f_swim = 0.; + /** Constant velocity to relax to. */ double v_swim = 0.; /** Flag for the swimming mode in a LB fluid. * Values: @@ -421,11 +432,6 @@ struct Particle { // NOLINT(bugprone-exception-escape) private: BondList bl; -public: - auto &bonds() { return bl; } - auto const &bonds() const { return bl; } - -private: #ifdef EXCLUSIONS /** list of particles, with which this particle has no non-bonded * interactions @@ -434,21 +440,151 @@ struct Particle { // NOLINT(bugprone-exception-escape) #endif public: - std::vector &exclusions() { -#ifdef EXCLUSIONS - return el; + auto const &id() const { return p.identity; } + auto &id() { return p.identity; } + auto const &mol_id() const { return p.mol_id; } + auto &mol_id() { return p.mol_id; } + auto const &type() const { return p.type; } + auto &type() { return p.type; } + + auto const &bonds() const { return bl; } + auto &bonds() { return bl; } + + auto const &pos() const { return r.p; } + auto &pos() { return r.p; } + auto const &v() const { return m.v; } + auto &v() { return m.v; } + auto const &force() const { return f.f; } + auto &force() { return f.f; } + + bool is_ghost() const { return l.ghost; } + void set_ghost(bool const ghost_flag) { l.ghost = ghost_flag; } + auto &pos_at_last_verlet_update() { return l.p_old; } + auto const &pos_at_last_verlet_update() const { return l.p_old; } + auto const &image_box() const { return l.i; } + auto &image_box() { return l.i; } + +#ifdef MASS + auto const &mass() const { return p.mass; } + auto &mass() { return p.mass; } #else - throw std::runtime_error{"Exclusions not enabled."}; + constexpr auto &mass() const { return p.mass; } #endif +#ifdef ROTATION + bool can_rotate() const { + return can_rotate_around(0) or can_rotate_around(1) or can_rotate_around(2); } - - std::vector const &exclusions() const { -#ifdef EXCLUSIONS - return el; + bool can_rotate_around(int const axis) const { + detail::check_axis_idx_valid(axis); + return detail::get_nth_bit(p.rotation, axis); + } + void set_can_rotate_around(int const axis, bool const rot_flag) { + detail::check_axis_idx_valid(axis); + if (rot_flag) { + p.rotation |= static_cast(1u << axis); + } else { + p.rotation &= static_cast(~(1u << axis)); + } + } + void set_can_rotate_all_axes() { + for (int axis = 0; axis <= 2; axis++) { + set_can_rotate_around(axis, true); + } + } + auto const &quat() const { return r.quat; } + auto &quat() { return r.quat; } + auto const &torque() const { return f.torque; } + auto &torque() { return f.torque; } + auto const &omega() const { return m.omega; } + auto &omega() { return m.omega; } + auto const &ext_torque() const { return p.ext_torque; } + auto &ext_torque() { return p.ext_torque; } + auto calc_director() const { return r.calc_director(); } +#else + bool can_rotate() const { return false; } + bool can_rotate_around(int const axis) const { return false; } +#endif +#ifdef DIPOLES + auto const &dipm() const { return p.dipm; } + auto &dipm() { return p.dipm; } +#endif +#ifdef ROTATIONAL_INERTIA + auto const &rinertia() const { return p.rinertia; } + auto &rinertia() { return p.rinertia; } +#else + constexpr auto &rinertia() const { return p.rinertia; } +#endif +#ifdef ELECTROSTATICS + auto const &q() const { return p.q; } + auto &q() { return p.q; } #else - throw std::runtime_error{"Exclusions not enabled."}; + constexpr auto &q() const { return p.q; } +#endif +#ifdef LB_ELECTROHYDRODYNAMICS + auto const &mu_E() const { return p.mu_E; } + auto &mu_E() { return p.mu_E; } #endif +#ifdef VIRTUAL_SITES + bool is_virtual() const { return p.is_virtual; } + void set_virtual(bool const virt_flag) { p.is_virtual = virt_flag; } +#ifdef VIRTUAL_SITES_RELATIVE + auto const &vs_relative() const { return p.vs_relative; } + auto &vs_relative() { return p.vs_relative; } +#endif // VIRTUAL_SITES_RELATIVE +#else + constexpr auto is_virtual() const { return p.is_virtual; } +#endif +#ifdef THERMOSTAT_PER_PARTICLE + auto const &gamma() const { return p.gamma; } + auto &gamma() { return p.gamma; } +#ifdef ROTATION + auto const &gamma_rot() const { return p.gamma_rot; } + auto &gamma_rot() { return p.gamma_rot; } +#endif // ROTATION +#endif // THERMOSTAT_PER_PARTICLE +#ifdef EXTERNAL_FORCES + bool has_fixed_coordinates() const { + return detail::get_nth_bit(p.ext_flag, 0); + } + bool is_fixed_along(int const axis) const { + detail::check_axis_idx_valid(axis); + return detail::get_nth_bit(p.ext_flag, axis + 1); + } + void set_fixed_along(int const axis, bool const fixed_flag) { + // set new flag + if (fixed_flag) { + p.ext_flag |= static_cast(1u << (axis + 1)); + } else { + p.ext_flag &= static_cast(~(1u << (axis + 1))); + } + // check if any flag is set and store that in the 0th bit + if (p.ext_flag >> 1) { + p.ext_flag |= static_cast(1u); + } else { + p.ext_flag &= static_cast(~1u); + } } + auto const &ext_force() const { return p.ext_force; } + auto &ext_force() { return p.ext_force; } + +#else // EXTERNAL_FORCES + constexpr bool has_fixed_coordinates() const { return false; } + constexpr bool is_fixed_along(int const axis) const { return false; } +#endif // EXTERNAL_FORCES +#ifdef ENGINE + auto const &swimming() const { return p.swim; } + auto &swimming() { return p.swim; } +#endif +#ifdef BOND_CONSTRAINT + auto const &pos_last_time_step() const { return r.p_last_timestep; } + auto &pos_last_time_step() { return r.p_last_timestep; } + auto const &rattle_params() const { return rattle; } + auto &rattle_params() { return rattle; } +#endif +#ifdef EXCLUSIONS + auto const &exclusions() const { return el; } + auto &exclusions() { return el; } +#endif private: friend boost::serialization::access; diff --git a/src/core/DomainDecomposition.cpp b/src/core/RegularDecomposition.cpp similarity index 86% rename from src/core/DomainDecomposition.cpp rename to src/core/RegularDecomposition.cpp index 9d65a01f0cd..322d585018a 100644 --- a/src/core/DomainDecomposition.cpp +++ b/src/core/RegularDecomposition.cpp @@ -19,7 +19,7 @@ * along with this program. If not, see . */ -#include "DomainDecomposition.hpp" +#include "RegularDecomposition.hpp" #include "RuntimeErrorStream.hpp" #include "errorhandling.hpp" @@ -45,7 +45,7 @@ /** Returns pointer to the cell which corresponds to the position if the * position is in the nodes spatial domain otherwise a nullptr pointer. */ -Cell *DomainDecomposition::position_to_cell(const Utils::Vector3d &pos) { +Cell *RegularDecomposition::position_to_cell(const Utils::Vector3d &pos) { Utils::Vector3i cpos; for (int i = 0; i < 3; i++) { @@ -76,7 +76,7 @@ Cell *DomainDecomposition::position_to_cell(const Utils::Vector3d &pos) { return &(cells.at(ind)); } -void DomainDecomposition::move_if_local( +void RegularDecomposition::move_if_local( ParticleList &src, ParticleList &rest, std::vector &modified_cells) { for (auto &part : src) { @@ -93,10 +93,10 @@ void DomainDecomposition::move_if_local( src.clear(); } -void DomainDecomposition::move_left_or_right(ParticleList &src, - ParticleList &left, - ParticleList &right, - int dir) const { +void RegularDecomposition::move_left_or_right(ParticleList &src, + ParticleList &left, + ParticleList &right, + int dir) const { for (auto it = src.begin(); it != src.end();) { if ((m_box.get_mi_coord(it->r.p[dir], m_local_box.my_left()[dir], dir) < 0.0) and @@ -115,7 +115,7 @@ void DomainDecomposition::move_left_or_right(ParticleList &src, } } -void DomainDecomposition::exchange_neighbors( +void RegularDecomposition::exchange_neighbors( ParticleList &pl, std::vector &modified_cells) { auto const node_neighbors = Utils::Mpi::cart_neighbors<3>(m_comm); static ParticleList send_buf_l, send_buf_r, recv_buf_l, recv_buf_r; @@ -168,8 +168,8 @@ void fold_and_reset(Particle &p, BoxGeometry const &box_geo) { } } // namespace -void DomainDecomposition::resort(bool global, - std::vector &diff) { +void RegularDecomposition::resort(bool global, + std::vector &diff) { ParticleList displaced_parts; for (auto &c : local_cells()) { @@ -234,12 +234,11 @@ void DomainDecomposition::resort(bool global, } } -void DomainDecomposition::mark_cells() { - int cnt_c = 0; - +void RegularDecomposition::mark_cells() { m_local_cells.clear(); m_ghost_cells.clear(); + int cnt_c = 0; for (int o = 0; o < ghost_cell_grid[2]; o++) for (int n = 0; n < ghost_cell_grid[1]; n++) for (int m = 0; m < ghost_cell_grid[0]; m++) { @@ -250,9 +249,10 @@ void DomainDecomposition::mark_cells() { m_ghost_cells.push_back(&cells.at(cnt_c++)); } } -void DomainDecomposition::fill_comm_cell_lists(ParticleList **part_lists, - const Utils::Vector3i &lc, - const Utils::Vector3i &hc) { + +void RegularDecomposition::fill_comm_cell_lists(ParticleList **part_lists, + const Utils::Vector3i &lc, + const Utils::Vector3i &hc) { for (int o = lc[0]; o <= hc[0]; o++) for (int n = lc[1]; n <= hc[1]; n++) for (int m = lc[2]; m <= hc[2]; m++) { @@ -261,7 +261,7 @@ void DomainDecomposition::fill_comm_cell_lists(ParticleList **part_lists, *part_lists++ = &(cells.at(i).particles()); } } -Utils::Vector3d DomainDecomposition::max_cutoff() const { +Utils::Vector3d RegularDecomposition::max_cutoff() const { auto dir_max_range = [this](int i) { return std::min(0.5 * m_box.length()[i], m_local_box.length()[i]); }; @@ -269,8 +269,8 @@ Utils::Vector3d DomainDecomposition::max_cutoff() const { return {dir_max_range(0), dir_max_range(1), dir_max_range(2)}; } -Utils::Vector3d DomainDecomposition::max_range() const { return cell_size; } -int DomainDecomposition::calc_processor_min_num_cells() const { +Utils::Vector3d RegularDecomposition::max_range() const { return cell_size; } +int RegularDecomposition::calc_processor_min_num_cells() const { /* the minimal number of cells can be lower if there are at least two nodes serving a direction, since this also ensures that the cell size is at most half the box @@ -282,46 +282,41 @@ int DomainDecomposition::calc_processor_min_num_cells() const { }); } -void DomainDecomposition::create_cell_grid(double range) { +void RegularDecomposition::create_cell_grid(double range) { auto const cart_info = Utils::Mpi::cart_get<3>(m_comm); int n_local_cells; - double cell_range[3]; - - /* initialize */ - cell_range[0] = cell_range[1] = cell_range[2] = range; - - /* Min num cells can not be smaller than calc_processor_min_num_cells. */ - int min_num_cells = calc_processor_min_num_cells(); + auto cell_range = Utils::Vector3d::broadcast(range); + auto const min_num_cells = calc_processor_min_num_cells(); if (range <= 0.) { /* this is the non-interacting case */ auto const cells_per_dir = - static_cast(std::ceil(std::pow(min_num_cells, 1. / 3.))); + static_cast(std::ceil(std::cbrt(min_num_cells))); cell_grid = Utils::Vector3i::broadcast(cells_per_dir); n_local_cells = Utils::product(cell_grid); } else { /* Calculate initial cell grid */ - double volume = m_local_box.length()[0]; - for (int i = 1; i < 3; i++) - volume *= m_local_box.length()[i]; - double scale = pow(DomainDecomposition::max_num_cells / volume, 1. / 3.); + auto const &local_box_l = m_local_box.length(); + auto const volume = Utils::product(local_box_l); + auto const scale = std::cbrt(RegularDecomposition::max_num_cells / volume); + for (int i = 0; i < 3; i++) { /* this is at least 1 */ - cell_grid[i] = (int)ceil(m_local_box.length()[i] * scale); - cell_range[i] = m_local_box.length()[i] / cell_grid[i]; + cell_grid[i] = static_cast(std::ceil(local_box_l[i] * scale)); + cell_range[i] = local_box_l[i] / static_cast(cell_grid[i]); if (cell_range[i] < range) { /* ok, too many cells for this direction, set to minimum */ - cell_grid[i] = (int)floor(m_local_box.length()[i] / range); + cell_grid[i] = static_cast(std::floor(local_box_l[i] / range)); if (cell_grid[i] < 1) { - runtimeErrorMsg() << "interaction range " << range << " in direction " - << i << " is larger than the local box size " - << m_local_box.length()[i]; + runtimeErrorMsg() + << "interaction range " << range << " in direction " << i + << " is larger than the local box size " << local_box_l[i]; cell_grid[i] = 1; } - cell_range[i] = m_local_box.length()[i] / cell_grid[i]; + cell_range[i] = local_box_l[i] / static_cast(cell_grid[i]); } } @@ -333,7 +328,7 @@ void DomainDecomposition::create_cell_grid(double range) { n_local_cells = Utils::product(cell_grid); /* done */ - if (n_local_cells <= DomainDecomposition::max_num_cells) + if (n_local_cells <= RegularDecomposition::max_num_cells) break; /* find coordinate with the smallest cell range */ @@ -360,9 +355,8 @@ void DomainDecomposition::create_cell_grid(double range) { } } - /* quit program if unsuccessful */ - if (n_local_cells > DomainDecomposition::max_num_cells) { - runtimeErrorMsg() << "no suitable cell grid found "; + if (n_local_cells > RegularDecomposition::max_num_cells) { + runtimeErrorMsg() << "no suitable cell grid found"; } auto const node_pos = cart_info.coords; @@ -384,7 +378,7 @@ void DomainDecomposition::create_cell_grid(double range) { m_ghost_cells.resize(new_cells - n_local_cells); } -void DomainDecomposition::init_cell_interactions() { +void RegularDecomposition::init_cell_interactions() { /* loop all local cells */ for (int o = 1; o < cell_grid[2] + 1; o++) for (int n = 1; n < cell_grid[1] + 1; n++) @@ -463,7 +457,7 @@ Utils::Vector3d shift(BoxGeometry const &box, LocalBox const &local_box, } } // namespace -GhostCommunicator DomainDecomposition::prepare_comm() { +GhostCommunicator RegularDecomposition::prepare_comm() { int dir, lr, i, cnt, n_comm_cells[3]; Utils::Vector3i lc{}, hc{}, done{}; @@ -566,12 +560,12 @@ GhostCommunicator DomainDecomposition::prepare_comm() { return ghost_comm; } -DomainDecomposition::DomainDecomposition(boost::mpi::communicator comm, - double range, - const BoxGeometry &box_geo, - const LocalBox &local_geo) +RegularDecomposition::RegularDecomposition(boost::mpi::communicator comm, + double range, + const BoxGeometry &box_geo, + const LocalBox &local_geo) : m_comm(std::move(comm)), m_box(box_geo), m_local_box(local_geo) { - /* set up new domain decomposition cell structure */ + /* set up new regular decomposition cell structure */ create_cell_grid(range); /* setup cell neighbors */ diff --git a/src/core/DomainDecomposition.hpp b/src/core/RegularDecomposition.hpp similarity index 91% rename from src/core/DomainDecomposition.hpp rename to src/core/RegularDecomposition.hpp index 747f19af010..765d4ffbeaf 100644 --- a/src/core/DomainDecomposition.hpp +++ b/src/core/RegularDecomposition.hpp @@ -19,8 +19,8 @@ * along with this program. If not, see . */ -#ifndef ESPRESSO_DOMAIN_DECOMPOSITION_HPP -#define ESPRESSO_DOMAIN_DECOMPOSITION_HPP +#ifndef ESPRESSO_REGULAR_DECOMPOSITION_HPP +#define ESPRESSO_REGULAR_DECOMPOSITION_HPP #include "ParticleDecomposition.hpp" @@ -40,13 +40,13 @@ #include /** @brief Structure containing the information about the cell grid used for - * domain decomposition. + * regular decomposition. * * The domain of a node is split into a 3D cell grid with dimension * cell_grid. Together with one ghost cell * layer on each side the overall dimension of the ghost cell grid is - * ghost_cell_grid. The domain - * decomposition enables one the use of the linked cell algorithm + * ghost_cell_grid. The regular + * decomposition enables the use of the linked cell algorithm * which is in turn used for setting up the Verlet list for the * system. You can see a 2D graphical representation of the linked * cell grid below. @@ -65,7 +65,7 @@ * some ghost-ghost cell interaction as well, which we do not need! * */ -struct DomainDecomposition : public ParticleDecomposition { +struct RegularDecomposition : public ParticleDecomposition { /** Grid dimensions per node. */ Utils::Vector3i cell_grid = {}; /** Cell size. */ @@ -74,7 +74,7 @@ struct DomainDecomposition : public ParticleDecomposition { Utils::Vector3i cell_offset = {}; /** linked cell grid with ghost frame. */ Utils::Vector3i ghost_cell_grid = {}; - /** inverse cell size = \see DomainDecomposition::cell_size ^ -1. */ + /** inverse cell size = \see RegularDecomposition::cell_size ^ -1. */ Utils::Vector3d inv_cell_size = {}; boost::mpi::communicator m_comm; @@ -87,16 +87,16 @@ struct DomainDecomposition : public ParticleDecomposition { GhostCommunicator m_collect_ghost_force_comm; public: - DomainDecomposition(boost::mpi::communicator comm, double range, - const BoxGeometry &box_geo, - const LocalBox &local_geo); + RegularDecomposition(boost::mpi::communicator comm, double range, + const BoxGeometry &box_geo, + const LocalBox &local_geo); GhostCommunicator const &exchange_ghosts_comm() const override { return m_exchange_ghosts_comm; } GhostCommunicator const &collect_ghost_force_comm() const override { return m_collect_ghost_force_comm; - }; + } Utils::Span local_cells() override { return Utils::make_span(m_local_cells); @@ -118,7 +118,7 @@ struct DomainDecomposition : public ParticleDecomposition { } private: - /** Fill @c m_local_cells list and @c m_ghost_cells list for use with domain + /** Fill @c m_local_cells list and @c m_ghost_cells list for use with regular * decomposition. */ void mark_cells(); @@ -196,14 +196,14 @@ struct DomainDecomposition : public ParticleDecomposition { */ void create_cell_grid(double range); - /** Init cell interactions for cell system domain decomposition. + /** Init cell interactions for cell system regular decomposition. * Initializes the interacting neighbor cell list of a cell. * This list of interacting neighbor cells is used by the Verlet * algorithm. */ void init_cell_interactions(); - /** Create communicators for cell structure domain decomposition (see \ref + /** Create communicators for cell structure regular decomposition (see \ref * GhostCommunicator). */ GhostCommunicator prepare_comm(); diff --git a/src/core/RuntimeErrorCollector.cpp b/src/core/RuntimeErrorCollector.cpp index ef5f3f68088..b08a463c58c 100644 --- a/src/core/RuntimeErrorCollector.cpp +++ b/src/core/RuntimeErrorCollector.cpp @@ -30,12 +30,9 @@ #include #include -using boost::mpi::all_reduce; -using boost::mpi::communicator; - namespace ErrorHandling { -RuntimeErrorCollector::RuntimeErrorCollector(communicator comm) +RuntimeErrorCollector::RuntimeErrorCollector(boost::mpi::communicator comm) : m_comm(std::move(comm)) {} RuntimeErrorCollector::~RuntimeErrorCollector() { @@ -99,18 +96,14 @@ void RuntimeErrorCollector::error(const std::ostringstream &mstr, } int RuntimeErrorCollector::count() const { - int totalMessages; - const int numMessages = m_errors.size(); - - all_reduce(m_comm, numMessages, totalMessages, std::plus()); - - return totalMessages; + return boost::mpi::all_reduce(m_comm, static_cast(m_errors.size()), + std::plus<>()); } int RuntimeErrorCollector::count(RuntimeError::ErrorLevel level) { - return std::count_if( + return static_cast(std::count_if( m_errors.begin(), m_errors.end(), - [level](const RuntimeError &e) { return e.level() >= level; }); + [level](const RuntimeError &e) { return e.level() >= level; })); } void RuntimeErrorCollector::clear() { m_errors.clear(); } diff --git a/src/core/SystemInterface.hpp b/src/core/SystemInterface.hpp index 15b79825362..0a1abd0dec6 100644 --- a/src/core/SystemInterface.hpp +++ b/src/core/SystemInterface.hpp @@ -98,7 +98,7 @@ class SystemInterface { virtual Utils::Vector3d box() const = 0; private: - std::string error_message(std::string property) const { + std::string error_message(std::string const &property) const { return "No GPU available or particle " + property + " not compiled in."; } }; diff --git a/src/core/accumulators/AccumulatorBase.hpp b/src/core/accumulators/AccumulatorBase.hpp index 7c311c00818..1edebb7bd59 100644 --- a/src/core/accumulators/AccumulatorBase.hpp +++ b/src/core/accumulators/AccumulatorBase.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef CORE_ACCUMULATORS_ACCUMULATORBASE -#define CORE_ACCUMULATORS_ACCUMULATORBASE +#ifndef CORE_ACCUMULATORS_ACCUMULATOR_BASE_HPP +#define CORE_ACCUMULATORS_ACCUMULATOR_BASE_HPP #include #include @@ -26,10 +26,11 @@ namespace Accumulators { class AccumulatorBase { public: - explicit AccumulatorBase(int delta_N = 1) : m_delta_N(delta_N){}; - int &delta_N() { return m_delta_N; }; + explicit AccumulatorBase(int delta_N = 1) : m_delta_N(delta_N) {} virtual ~AccumulatorBase() = default; + int &delta_N() { return m_delta_N; } + virtual void update() = 0; /** Dimensions needed to reshape the flat array returned by the accumulator */ virtual std::vector shape() const = 0; diff --git a/src/core/accumulators/Correlator.cpp b/src/core/accumulators/Correlator.cpp index 90e73e659b0..326827edcfd 100644 --- a/src/core/accumulators/Correlator.cpp +++ b/src/core/accumulators/Correlator.cpp @@ -85,8 +85,8 @@ std::vector scalar_product(std::vector const &A, "Error in scalar product: The vector sizes do not match"); } - return std::vector( - 1, std::inner_product(A.begin(), A.end(), B.begin(), 0.0)); + auto const result = std::inner_product(A.begin(), A.end(), B.begin(), 0.0); + return {result}; } std::vector componentwise_product(std::vector const &A, @@ -284,7 +284,7 @@ void Correlator::initialize() { auto const n_result = n_values(); n_sweeps = std::vector(n_result, 0); - n_vals = std::vector(m_hierarchy_depth, 0); + n_vals = std::vector(m_hierarchy_depth, 0); result.resize(std::array{{n_result, m_dim_corr}}); @@ -295,7 +295,7 @@ void Correlator::initialize() { } } - newest = std::vector(m_hierarchy_depth, m_tau_lin); + newest = std::vector(m_hierarchy_depth, m_tau_lin); tau.resize(n_result); for (int i = 0; i < m_tau_lin + 1; i++) { @@ -324,17 +324,21 @@ void Correlator::update() { // Let's find out how far we have to go back in the hierarchy to make space // for the new value - int i = 0; - while (true) { - if (((t - ((m_tau_lin + 1) * ((1 << (i + 1)) - 1) + 1)) % (1 << (i + 1)) == - 0)) { - if (i < (m_hierarchy_depth - 1) && n_vals[i] > m_tau_lin) { - highest_level_to_compress += 1; - i++; - } else + { + auto const max_depth = m_hierarchy_depth - 1; + int i = 0; + while (true) { + if (i >= max_depth or n_vals[i] <= m_tau_lin) { break; - } else - break; + } + auto const modulo = 1 << (i + 1); + auto const remainder = (t - (m_tau_lin + 1) * (modulo - 1) - 1) % modulo; + if (remainder != 0) { + break; + } + highest_level_to_compress += 1; + i++; + } } // Now we know we must make space on the levels 0..highest_level_to_compress @@ -374,7 +378,7 @@ void Correlator::update() { } // Now update the lowest level correlation estimates - for (unsigned j = 0; j < min(m_tau_lin + 1, n_vals[0]); j++) { + for (long j = 0; j < min(m_tau_lin + 1, n_vals[0]); j++) { auto const index_new = newest[0]; auto const index_old = (newest[0] - j + m_tau_lin + 1) % (m_tau_lin + 1); auto const temp = @@ -388,8 +392,8 @@ void Correlator::update() { } // Now for the higher ones for (int i = 1; i < highest_level_to_compress + 2; i++) { - for (unsigned j = (m_tau_lin + 1) / 2 + 1; - j < min(m_tau_lin + 1, n_vals[i]); j++) { + for (long j = (m_tau_lin + 1) / 2 + 1; j < min(m_tau_lin + 1, n_vals[i]); + j++) { auto const index_new = newest[i]; auto const index_old = (newest[i] - j + m_tau_lin + 1) % (m_tau_lin + 1); auto const index_res = @@ -418,32 +422,27 @@ int Correlator::finalize() { finalized = true; for (int ll = 0; ll < m_hierarchy_depth - 1; ll++) { - int vals_ll; // number of values remaining in the lowest level + long vals_ll; // number of values remaining in the lowest level if (n_vals[ll] > m_tau_lin + 1) - vals_ll = m_tau_lin + static_cast(n_vals[ll]) % 2; + vals_ll = m_tau_lin + n_vals[ll] % 2; else vals_ll = n_vals[ll]; while (vals_ll) { // Check, if we will want to push the value from the lowest level - int highest_level_to_compress = -1; - if (vals_ll % 2) { - highest_level_to_compress = ll; - } + auto highest_level_to_compress = (vals_ll % 2) ? ll : -1; - int i = ll + 1; // lowest level for which we have to check for compression // Let's find out how far we have to go back in the hierarchy to make // space for the new value - while (highest_level_to_compress > -1) { - if (n_vals[i] % 2) { - if (i < (m_hierarchy_depth - 1) && n_vals[i] > m_tau_lin) { - highest_level_to_compress += 1; - i++; - } else { + { + auto const max_depth = m_hierarchy_depth - 1; + int i = ll + 1; // lowest level for which to check for compression + while (highest_level_to_compress > -1) { + if (i >= max_depth or n_vals[i] % 2 == 0 or n_vals[i] <= m_tau_lin) { break; } - } else { - break; + highest_level_to_compress += 1; + i++; } } vals_ll -= 1; @@ -467,8 +466,8 @@ int Correlator::finalize() { // We only need to update correlation estimates for the higher levels for (int i = ll + 1; i < highest_level_to_compress + 2; i++) { - for (int j = (m_tau_lin + 1) / 2 + 1; j < min(m_tau_lin + 1, n_vals[i]); - j++) { + for (long j = (m_tau_lin + 1) / 2 + 1; + j < min(m_tau_lin + 1, n_vals[i]); j++) { auto const index_new = newest[i]; auto const index_old = (newest[i] - j + m_tau_lin + 1) % (m_tau_lin + 1); diff --git a/src/core/accumulators/Correlator.hpp b/src/core/accumulators/Correlator.hpp index d67ec5dbb96..c69aeed74fd 100644 --- a/src/core/accumulators/Correlator.hpp +++ b/src/core/accumulators/Correlator.hpp @@ -16,6 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ +#ifndef CORE_ACCUMULATORS_CORRELATOR_HPP +#define CORE_ACCUMULATORS_CORRELATOR_HPP /** @file * * This module computes correlations (and other two time averages) on @@ -97,8 +99,6 @@ * the topology concept * - Write a destructor */ -#ifndef _STATISTICS_CORRELATION_H -#define _STATISTICS_CORRELATION_H #include "AccumulatorBase.hpp" #include "integrate.hpp" @@ -195,7 +195,7 @@ class Correlator : public AccumulatorBase { return shape; } std::vector get_samples_sizes() const { - return std::vector(n_sweeps.begin(), n_sweeps.end()); + return {n_sweeps.begin(), n_sweeps.end()}; } std::vector get_lag_times() const; @@ -250,9 +250,9 @@ class Correlator : public AccumulatorBase { /// number of correlation sweeps at a particular value of tau std::vector n_sweeps; /// number of data values already present at a particular value of tau - std::vector n_vals; + std::vector n_vals; /// index of the newest entry in each hierarchy level - std::vector newest; + std::vector newest; std::vector A_accumulated_average; ///< all A values are added up here std::vector B_accumulated_average; ///< all B values are added up here diff --git a/src/core/accumulators/MeanVarianceCalculator.hpp b/src/core/accumulators/MeanVarianceCalculator.hpp index 5a3ce3e8617..f65c1812ae2 100644 --- a/src/core/accumulators/MeanVarianceCalculator.hpp +++ b/src/core/accumulators/MeanVarianceCalculator.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _ACCUMULATORS_ACCUMULATOR_H -#define _ACCUMULATORS_ACCUMULATOR_H +#ifndef CORE_ACCUMULATORS_MEAN_VARIANCE_CALCULATOR_HPP +#define CORE_ACCUMULATORS_MEAN_VARIANCE_CALCULATOR_HPP #include "AccumulatorBase.hpp" #include "observables/Observable.hpp" diff --git a/src/core/actor/Actor.hpp b/src/core/actor/Actor.hpp index cf9a0ba61be..e866eec957c 100644 --- a/src/core/actor/Actor.hpp +++ b/src/core/actor/Actor.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _ACTOR_ACTOR_HPP -#define _ACTOR_ACTOR_HPP +#ifndef CORE_ACTOR_ACTOR_HPP +#define CORE_ACTOR_ACTOR_HPP #include "SystemInterface.hpp" @@ -27,10 +27,10 @@ */ class Actor { public: - virtual void computeForces(SystemInterface &s){}; - virtual void computeTorques(SystemInterface &s){}; - virtual void computeEnergy(SystemInterface &s){}; + virtual void computeForces(SystemInterface &) {} + virtual void computeTorques(SystemInterface &) {} + virtual void computeEnergy(SystemInterface &) {} virtual ~Actor() = default; }; -#endif /* _ACTOR_ACTOR_HPP */ +#endif /* CORE_ACTOR_ACTOR_HPP */ diff --git a/src/core/actor/ActorList.hpp b/src/core/actor/ActorList.hpp index 3de187d9491..d004069b912 100644 --- a/src/core/actor/ActorList.hpp +++ b/src/core/actor/ActorList.hpp @@ -16,10 +16,11 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _ACTOR_ACTORLIST_HPP -#define _ACTOR_ACTORLIST_HPP +#ifndef CORE_ACTOR_ACTORLIST_HPP +#define CORE_ACTOR_ACTORLIST_HPP #include "Actor.hpp" + #include class ActorList : public std::vector { @@ -28,4 +29,4 @@ class ActorList : public std::vector { void remove(Actor *actor); }; -#endif /* _ACTOR_ACTORLIST_HPP */ +#endif /* CORE_ACTOR_ACTORLIST_HPP */ diff --git a/src/core/actor/Mmm1dgpuForce.cpp b/src/core/actor/Mmm1dgpuForce.cpp index d5695ceede6..1a7e418a05e 100644 --- a/src/core/actor/Mmm1dgpuForce.cpp +++ b/src/core/actor/Mmm1dgpuForce.cpp @@ -25,7 +25,7 @@ #include "electrostatics_magnetostatics/common.hpp" #include "electrostatics_magnetostatics/coulomb.hpp" -#include "cells.hpp" +#include "CellStructureType.hpp" #include "energy.hpp" #include "forces.hpp" #include "grid.hpp" @@ -41,7 +41,8 @@ Mmm1dgpuForce::Mmm1dgpuForce(SystemInterface &s) { if (box_geo.periodic(0) || box_geo.periodic(1) || !box_geo.periodic(2)) { throw std::runtime_error("MMM1D requires periodicity (0, 0, 1)"); } - if (cell_structure.decomposition_type() != CELL_STRUCTURE_NSQUARE) { + if (local_geo.cell_structure_type() != + CellStructureType::CELL_STRUCTURE_NSQUARE) { throw std::runtime_error("MMM1D requires the N-square cellsystem"); } diff --git a/src/core/bonded_interactions/bonded_coulomb.hpp b/src/core/bonded_interactions/bonded_coulomb.hpp index 472fb3f0b46..7f1957d5ed4 100644 --- a/src/core/bonded_interactions/bonded_coulomb.hpp +++ b/src/core/bonded_interactions/bonded_coulomb.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _BONDED_COULOMB_HPP -#define _BONDED_COULOMB_HPP +#ifndef CORE_BN_IA_BONDED_COULOMB_HPP +#define CORE_BN_IA_BONDED_COULOMB_HPP /** \file * Routines to calculate the bonded Coulomb potential between * particle pairs. diff --git a/src/core/bonded_interactions/bonded_coulomb_sr.hpp b/src/core/bonded_interactions/bonded_coulomb_sr.hpp index ccd647f2c3d..ef891794727 100644 --- a/src/core/bonded_interactions/bonded_coulomb_sr.hpp +++ b/src/core/bonded_interactions/bonded_coulomb_sr.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _BONDED_COULOMB_SR_HPP -#define _BONDED_COULOMB_SR_HPP +#ifndef CORE_BN_IA_BONDED_COULOMB_SR_HPP +#define CORE_BN_IA_BONDED_COULOMB_SR_HPP /** \file * Routines to calculate the short-range part of the bonded Coulomb potential * between particle pairs. Can be used to subtract certain intramolecular @@ -82,11 +82,10 @@ inline boost::optional BondedCoulombSR::energy(Particle const &p1, Particle const &p2, Utils::Vector3d const &dx) const { #ifdef ELECTROSTATICS - auto const dist2 = dx.norm2(); - auto const dist = sqrt(dist2); - return Coulomb::pair_energy(p1, p2, q1q2, dx, dist, dist2); + auto const dist = dx.norm(); + return Coulomb::pair_energy(p1, p2, q1q2, dx, dist); #else - return .0; + return 0.; #endif } diff --git a/src/core/bonded_interactions/bonded_interaction_data.hpp b/src/core/bonded_interactions/bonded_interaction_data.hpp index 0b864ad8fa7..da4d51679d4 100644 --- a/src/core/bonded_interactions/bonded_interaction_data.hpp +++ b/src/core/bonded_interactions/bonded_interaction_data.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _BONDED_INTERACTION_DATA_HPP -#define _BONDED_INTERACTION_DATA_HPP +#ifndef CORE_BN_IA_BONDED_INTERACTION_DATA_HPP +#define CORE_BN_IA_BONDED_INTERACTION_DATA_HPP /** @file * Data structures for bonded interactions. * For more information on how to add new interactions, see @ref bondedIA_new. @@ -67,8 +67,7 @@ struct NoneBond { private: friend boost::serialization::access; - template - void serialize(Archive &ar, long int /* version */) {} + template void serialize(Archive &, long int) {} }; /** Interaction type for virtual bonds */ @@ -78,8 +77,7 @@ struct VirtualBond { private: friend boost::serialization::access; - template - void serialize(Archive &ar, long int /* version */) {} + template void serialize(Archive &, long int) {} }; /** Visitor to get the number of bound partners from the bond parameter diff --git a/src/core/bonded_interactions/bonded_interaction_utils.hpp b/src/core/bonded_interactions/bonded_interaction_utils.hpp index ada4adbeb88..f21ec152722 100644 --- a/src/core/bonded_interactions/bonded_interaction_utils.hpp +++ b/src/core/bonded_interactions/bonded_interaction_utils.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _BONDED_INTERACTION_UTILS_HPP -#define _BONDED_INTERACTION_UTILS_HPP +#ifndef CORE_BN_IA_BONDED_INTERACTION_UTILS_HPP +#define CORE_BN_IA_BONDED_INTERACTION_UTILS_HPP #include "bonded_interaction_data.hpp" diff --git a/src/core/bonded_interactions/fene.hpp b/src/core/bonded_interactions/fene.hpp index f3dddbc58b3..a18f34322b6 100644 --- a/src/core/bonded_interactions/fene.hpp +++ b/src/core/bonded_interactions/fene.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _FENE_HPP -#define _FENE_HPP +#ifndef CORE_BN_IA_FENE_HPP +#define CORE_BN_IA_FENE_HPP /** \file * Routines to calculate the FENE potential between particle pairs. * diff --git a/src/core/bonded_interactions/harmonic.hpp b/src/core/bonded_interactions/harmonic.hpp index e1018f7460f..77f8a4e06cb 100644 --- a/src/core/bonded_interactions/harmonic.hpp +++ b/src/core/bonded_interactions/harmonic.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _HARMONIC_HPP -#define _HARMONIC_HPP +#ifndef CORE_BN_IA_HARMONIC_HPP +#define CORE_BN_IA_HARMONIC_HPP /** \file * Routines to calculate the harmonic bond potential between particle pairs. */ diff --git a/src/core/bonded_interactions/quartic.hpp b/src/core/bonded_interactions/quartic.hpp index c461658e064..86b777305fd 100644 --- a/src/core/bonded_interactions/quartic.hpp +++ b/src/core/bonded_interactions/quartic.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _QUARTIC_HPP -#define _QUARTIC_HPP +#ifndef CORE_BN_IA_QUARTIC_HPP +#define CORE_BN_IA_QUARTIC_HPP /** \file * Routines to calculate the quartic potential between particle pairs. */ diff --git a/src/core/cells.cpp b/src/core/cells.cpp index ffc74396290..ef9151d965a 100644 --- a/src/core/cells.cpp +++ b/src/core/cells.cpp @@ -34,8 +34,8 @@ #include "integrate.hpp" #include "particle_data.hpp" -#include "DomainDecomposition.hpp" #include "ParticleDecomposition.hpp" +#include "RegularDecomposition.hpp" #include #include @@ -70,7 +70,7 @@ std::vector> get_pairs_filtered(double const distance, Particle const &p2, Distance const &d) { if (d.dist2 < cutoff2 and filter(p1) and filter(p2)) - ret.emplace_back(p1.p.identity, p2.p.identity); + ret.emplace_back(p1.id(), p2.id()); }; cell_structure.non_bonded_loop(pair_kernel); @@ -102,7 +102,7 @@ std::vector non_bonded_loop_trace() { std::vector ret; auto pair_kernel = [&ret](Particle const &p1, Particle const &p2, Distance const &d) { - ret.emplace_back(p1.p.identity, p2.p.identity, p1.r.p, p2.r.p, d.vec21, + ret.emplace_back(p1.id(), p2.id(), p1.pos(), p2.pos(), d.vec21, comm_cart.rank()); }; @@ -124,7 +124,7 @@ static auto mpi_get_pairs_of_types_local(double const distance, std::vector const &types) { auto pairs = get_pairs_filtered(distance, [types](Particle const &p) { return std::any_of(types.begin(), types.end(), - [p](int const type) { return p.p.type == type; }); + [p](int const type) { return p.type() == type; }); }); Utils::Mpi::gather_buffer(pairs, comm_cart); return pairs; @@ -195,14 +195,14 @@ std::vector mpi_resort_particles(int global_flag) { return n_parts; } -void cells_re_init(int new_cs) { +void cells_re_init(CellStructureType new_cs) { switch (new_cs) { - case CELL_STRUCTURE_DOMDEC: - cell_structure.set_domain_decomposition(comm_cart, interaction_range(), - box_geo, local_geo); + case CellStructureType::CELL_STRUCTURE_REGULAR: + cell_structure.set_regular_decomposition(comm_cart, interaction_range(), + box_geo, local_geo); break; - case CELL_STRUCTURE_NSQUARE: - cell_structure.set_atom_decomposition(comm_cart, box_geo); + case CellStructureType::CELL_STRUCTURE_NSQUARE: + cell_structure.set_atom_decomposition(comm_cart, box_geo, local_geo); break; default: throw std::runtime_error("Unknown cell system type"); @@ -213,7 +213,9 @@ void cells_re_init(int new_cs) { REGISTER_CALLBACK(cells_re_init) -void mpi_bcast_cell_structure(int cs) { mpi_call_all(cells_re_init, cs); } +void mpi_bcast_cell_structure(CellStructureType cs) { + mpi_call_all(cells_re_init, cs); +} void check_resort_particles() { auto const level = (cell_structure.check_resort_required( @@ -246,7 +248,7 @@ void cells_update_ghosts(unsigned data_parts) { /* Add the ghost particles to the index if we don't already * have them. */ for (auto &part : cell_structure.ghost_particles()) { - if (cell_structure.get_local_particle(part.p.identity) == nullptr) { + if (cell_structure.get_local_particle(part.id()) == nullptr) { cell_structure.update_particle_index(part.identity(), &part); } } @@ -263,8 +265,8 @@ Cell *find_current_cell(const Particle &p) { return cell_structure.find_current_cell(p); } -const DomainDecomposition *get_domain_decomposition() { - return &dynamic_cast( +const RegularDecomposition *get_regular_decomposition() { + return &dynamic_cast( Utils::as_const(cell_structure).decomposition()); } diff --git a/src/core/cells.hpp b/src/core/cells.hpp index 5c2bdc638d9..7073069d010 100644 --- a/src/core/cells.hpp +++ b/src/core/cells.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _CELLS_H -#define _CELLS_H +#ifndef CORE_CELLS_HPP +#define CORE_CELLS_HPP /** \file * This file contains everything related to the global cell structure / cell * system. @@ -29,8 +29,8 @@ * (regardless if they reside on the same or different nodes) * interact with each other. The following cell systems are implemented: * - * - domain decomposition: The simulation box is divided spatially - * into cells (see \ref DomainDecomposition.hpp). This is suitable for + * - regular decomposition: The simulation box is divided spatially + * into cells (see \ref RegularDecomposition.hpp). This is suitable for * short range interactions. * - nsquare: The particles are distributed equally on all nodes * regardless their spatial position (see \ref AtomDecomposition.hpp). @@ -40,8 +40,9 @@ #include "Cell.hpp" #include "CellStructure.hpp" -#include "DomainDecomposition.hpp" +#include "CellStructureType.hpp" #include "Particle.hpp" +#include "RegularDecomposition.hpp" #include #include @@ -63,10 +64,10 @@ extern CellStructure cell_structure; /** Reinitialize the cell structures. * @param new_cs The new topology to use afterwards. */ -void cells_re_init(int new_cs); +void cells_re_init(CellStructureType new_cs); /** Change the cell structure on all nodes. */ -void mpi_bcast_cell_structure(int cs); +void mpi_bcast_cell_structure(CellStructureType cs); /** * @brief Set @ref CellStructure::use_verlet_list @@ -113,19 +114,19 @@ std::vector mpi_resort_particles(int global_flag); /** * @brief Find the cell in which a particle is stored. * - * Uses position_to_cell on p.r.p. If this is not on the node's domain, - * uses position at last Verlet list rebuild (p.l.p_old). + * Uses position_to_cell on p.pos(). If this is not on the node's domain, + * uses position at last Verlet list rebuild (p.p_old()). * * @return pointer to the cell or nullptr if the particle is not on the node */ Cell *find_current_cell(const Particle &p); /** - * @brief Return a pointer to the global DomainDecomposition. + * @brief Return a pointer to the global RegularDecomposition. * * @return Pointer to the decomposition if it is set, nullptr otherwise. */ -const DomainDecomposition *get_domain_decomposition(); +const RegularDecomposition *get_regular_decomposition(); class PairInfo { public: @@ -133,7 +134,7 @@ class PairInfo { PairInfo(int _id1, int _id2, Utils::Vector3d _pos1, Utils::Vector3d _pos2, Utils::Vector3d _vec21, int _node) : id1(_id1), id2(_id2), pos1(_pos1), pos2(_pos2), vec21(_vec21), - node(_node){}; + node(_node) {} int id1; int id2; Utils::Vector3d pos1; diff --git a/src/core/cluster_analysis/Cluster.cpp b/src/core/cluster_analysis/Cluster.cpp index cfdae59aeef..0841aaccefd 100644 --- a/src/core/cluster_analysis/Cluster.cpp +++ b/src/core/cluster_analysis/Cluster.cpp @@ -57,15 +57,15 @@ Cluster::center_of_mass_subcluster(std::vector const &particle_ids) { // of the cluster is arbitrarily chosen as reference. auto const reference_position = - folded_position(get_particle_data(particles[0]).r.p, box_geo); + folded_position(get_particle_data(particles[0]).pos(), box_geo); double total_mass = 0.; for (int pid : particle_ids) { auto const folded_pos = - folded_position(get_particle_data(pid).r.p, box_geo); + folded_position(get_particle_data(pid).pos(), box_geo); auto const dist_to_reference = box_geo.get_mi_vector(folded_pos, reference_position); - com += dist_to_reference * get_particle_data(pid).p.mass; - total_mass += get_particle_data(pid).p.mass; + com += dist_to_reference * get_particle_data(pid).mass(); + total_mass += get_particle_data(pid).mass(); } // Normalize by number of particles @@ -83,8 +83,8 @@ double Cluster::longest_distance() { for (auto a = particles.begin(); a != particles.end(); a++) { for (auto b = a; ++b != particles.end();) { auto const dist = box_geo - .get_mi_vector(get_particle_data(*a).r.p, - get_particle_data(*b).r.p) + .get_mi_vector(get_particle_data(*a).pos(), + get_particle_data(*b).pos()) .norm(); // Larger than previous largest distance? @@ -107,7 +107,7 @@ Cluster::radius_of_gyration_subcluster(std::vector const &particle_ids) { for (auto const pid : particle_ids) { // calculate square length of this distance sum_sq_dist += - box_geo.get_mi_vector(com, get_particle_data(pid).r.p).norm2(); + box_geo.get_mi_vector(com, get_particle_data(pid).pos()).norm2(); } return sqrt(sum_sq_dist / static_cast(particle_ids.size())); @@ -137,7 +137,7 @@ std::pair Cluster::fractal_dimension(double dr) { std::vector distances; for (auto const &it : particles) { - distances.push_back(box_geo.get_mi_vector(com, get_particle_data(it).r.p) + distances.push_back(box_geo.get_mi_vector(com, get_particle_data(it).pos()) .norm()); // add distance from the current particle // to the com in the distances vectors } diff --git a/src/core/collision.cpp b/src/core/collision.cpp index 9243b401ce2..f93dccb9b4b 100644 --- a/src/core/collision.cpp +++ b/src/core/collision.cpp @@ -268,23 +268,23 @@ const Particle &glue_to_surface_calc_vs_pos(const Particle &p1, const Particle &p2, Utils::Vector3d &pos) { double c; - auto const vec21 = box_geo.get_mi_vector(p1.r.p, p2.r.p); + auto const vec21 = box_geo.get_mi_vector(p1.pos(), p2.pos()); const double dist_betw_part = vec21.norm(); // Find out, which is the particle to be glued. - if ((p1.p.type == collision_params.part_type_to_be_glued) && - (p2.p.type == collision_params.part_type_to_attach_vs_to)) { + if ((p1.type() == collision_params.part_type_to_be_glued) && + (p2.type() == collision_params.part_type_to_attach_vs_to)) { c = 1 - collision_params.dist_glued_part_to_vs / dist_betw_part; - } else if ((p2.p.type == collision_params.part_type_to_be_glued) && - (p1.p.type == collision_params.part_type_to_attach_vs_to)) { + } else if ((p2.type() == collision_params.part_type_to_be_glued) && + (p1.type() == collision_params.part_type_to_attach_vs_to)) { c = collision_params.dist_glued_part_to_vs / dist_betw_part; } else { throw std::runtime_error("This should never be thrown. Bug."); } for (int i = 0; i < 3; i++) { - pos[i] = p2.r.p[i] + vec21[i] * c; + pos[i] = p2.pos()[i] + vec21[i] * c; } - if (p1.p.type == collision_params.part_type_to_attach_vs_to) + if (p1.type() == collision_params.part_type_to_attach_vs_to) return p1; return p2; @@ -294,9 +294,9 @@ void bind_at_point_of_collision_calc_vs_pos(const Particle *const p1, const Particle *const p2, Utils::Vector3d &pos1, Utils::Vector3d &pos2) { - auto const vec21 = box_geo.get_mi_vector(p1->r.p, p2->r.p); - pos1 = p1->r.p - vec21 * collision_params.vs_placement; - pos2 = p1->r.p - vec21 * (1. - collision_params.vs_placement); + auto const vec21 = box_geo.get_mi_vector(p1->pos(), p2->pos()); + pos1 = p1->pos() - vec21 * collision_params.vs_placement; + pos2 = p1->pos() - vec21 * (1. - collision_params.vs_placement); } // Considers three particles for three_particle_binding and performs @@ -305,10 +305,12 @@ void coldet_do_three_particle_bond(Particle &p, Particle const &p1, Particle const &p2) { // If p1 and p2 are not closer or equal to the cutoff distance, skip // p1: - if (box_geo.get_mi_vector(p.r.p, p1.r.p).norm() > collision_params.distance) + if (box_geo.get_mi_vector(p.pos(), p1.pos()).norm() > + collision_params.distance) return; // p2: - if (box_geo.get_mi_vector(p.r.p, p2.r.p).norm() > collision_params.distance) + if (box_geo.get_mi_vector(p.pos(), p2.pos()).norm() > + collision_params.distance) return; // Check, if there already is a three-particle bond centered on p @@ -341,9 +343,9 @@ void coldet_do_three_particle_bond(Particle &p, Particle const &p1, // First, find the angle between the particle p, p1 and p2 /* vector from p to p1 */ - auto const vec1 = box_geo.get_mi_vector(p.r.p, p1.r.p).normalize(); + auto const vec1 = box_geo.get_mi_vector(p.pos(), p1.pos()).normalize(); /* vector from p to p2 */ - auto const vec2 = box_geo.get_mi_vector(p.r.p, p2.r.p).normalize(); + auto const vec2 = box_geo.get_mi_vector(p.pos(), p2.pos()).normalize(); auto const cosine = boost::algorithm::clamp(vec1 * vec2, -TINY_COS_VALUE, TINY_COS_VALUE); @@ -360,7 +362,7 @@ void coldet_do_three_particle_bond(Particle &p, Particle const &p1, collision_params.bond_three_particles); // Create the bond - const std::array bondT = {p1.p.identity, p2.p.identity}; + const std::array bondT = {{p1.id(), p2.id()}}; p.bonds().insert({bond_id, bondT}); } @@ -369,14 +371,14 @@ void place_vs_and_relate_to_particle(const int current_vs_pid, const Utils::Vector3d &pos, int relate_to) { Particle new_part; - new_part.p.identity = current_vs_pid; - new_part.r.p = pos; + new_part.id() = current_vs_pid; + new_part.pos() = pos; auto p_vs = cell_structure.add_particle(std::move(new_part)); local_vs_relate_to(*p_vs, get_part(relate_to)); p_vs->p.is_virtual = true; - p_vs->p.type = collision_params.vs_particle_type; + p_vs->type() = collision_params.vs_particle_type; } void bind_at_poc_create_bond_between_vs(const int current_vs_pid, @@ -412,14 +414,14 @@ void bind_at_poc_create_bond_between_vs(const int current_vs_pid, void glue_to_surface_bind_part_to_vs(const Particle *const p1, const Particle *const p2, const int vs_pid_plus_one, - const CollisionPair &c) { + const CollisionPair &) { // Create bond between the virtual particles const int bondG[] = {vs_pid_plus_one - 1}; - if (p1->p.type == collision_params.part_type_after_glueing) { - get_part(p1->p.identity).bonds().insert({collision_params.bond_vs, bondG}); + if (p1->type() == collision_params.part_type_after_glueing) { + get_part(p1->id()).bonds().insert({collision_params.bond_vs, bondG}); } else { - get_part(p2->p.identity).bonds().insert({collision_params.bond_vs, bondG}); + get_part(p2->id()).bonds().insert({collision_params.bond_vs, bondG}); } } @@ -438,7 +440,7 @@ static void three_particle_binding_do_search(Cell *basecell, Particle &p1, auto handle_cell = [&p1, &p2](Cell *c) { for (auto &P : c->particles()) { // Skip collided particles themselves - if ((P.p.identity == p1.p.identity) || (P.p.identity == p2.p.identity)) { + if ((P.id() == p1.id()) || (P.id() == p2.id())) { continue; } @@ -447,15 +449,15 @@ static void three_particle_binding_do_search(Cell *basecell, Particle &p1, // non-cyclic permutations). // coldet_do_three_particle_bond checks the bonding criterion and if // the involved particles are not already bonded before it binds them. - if (!P.l.ghost) { + if (!P.is_ghost()) { coldet_do_three_particle_bond(P, p1, p2); } - if (!p1.l.ghost) { + if (!p1.is_ghost()) { coldet_do_three_particle_bond(p1, P, p2); } - if (!p2.l.ghost) { + if (!p2.is_ghost()) { coldet_do_three_particle_bond(p2, P, p1); } } @@ -513,7 +515,7 @@ void handle_collisions() { if (bind_centers()) { for (auto &c : local_collision_queue) { // put the bond to the non-ghost particle; at least one partner always is - if (cell_structure.get_local_particle(c.pp1)->l.ghost) { + if (cell_structure.get_local_particle(c.pp1)->is_ghost()) { std::swap(c.pp1, c.pp2); } @@ -554,7 +556,7 @@ void handle_collisions() { // or one is ghost and one is not accessible // we only increase the counter for the ext id to use based on the // number of particles created by other nodes - if (((!p1 or p1->l.ghost) and (!p2 or p2->l.ghost)) or !p1 or !p2) { + if (((!p1 or p1->is_ghost()) and (!p2 or p2->is_ghost())) or !p1 or !p2) { // Increase local counters if (collision_params.mode & COLLISION_MODE_VS) { current_vs_pid++; @@ -563,12 +565,12 @@ void handle_collisions() { current_vs_pid++; if (collision_params.mode == COLLISION_MODE_GLUE_TO_SURF) { if (p1) - if (p1->p.type == collision_params.part_type_to_be_glued) { - p1->p.type = collision_params.part_type_after_glueing; + if (p1->type() == collision_params.part_type_to_be_glued) { + p1->type() = collision_params.part_type_after_glueing; } if (p2) - if (p2->p.type == collision_params.part_type_to_be_glued) { - p2->p.type = collision_params.part_type_after_glueing; + if (p2->type() == collision_params.part_type_to_be_glued) { + p2->type() = collision_params.part_type_after_glueing; } } // mode glue to surface @@ -580,14 +582,14 @@ void handle_collisions() { Utils::Vector3d pos1, pos2; // Enable rotation on the particles to which vs will be attached - p1->p.rotation = ROTATION_X | ROTATION_Y | ROTATION_Z; - p2->p.rotation = ROTATION_X | ROTATION_Y | ROTATION_Z; + p1->set_can_rotate_all_axes(); + p2->set_can_rotate_all_axes(); // Positions of the virtual sites bind_at_point_of_collision_calc_vs_pos(p1, p2, pos1, pos2); auto handle_particle = [&](Particle *p, Utils::Vector3d const &pos) { - if (not p->l.ghost) { + if (not p->is_ghost()) { place_vs_and_relate_to_particle(current_vs_pid, pos, p->identity()); // Particle storage locations may have changed due to @@ -618,8 +620,8 @@ void handle_collisions() { // can not always know whether or not a vs is placed if (collision_params.part_type_after_glueing != collision_params.part_type_to_be_glued) { - if ((p1->p.type == collision_params.part_type_after_glueing) || - (p2->p.type == collision_params.part_type_after_glueing)) { + if ((p1->type() == collision_params.part_type_after_glueing) || + (p2->type() == collision_params.part_type_after_glueing)) { current_vs_pid++; continue; } @@ -631,22 +633,22 @@ void handle_collisions() { // Add a bond between the centers of the colliding particles // The bond is placed on the node that has p1 - if (!p1->l.ghost) { + if (!p1->is_ghost()) { const int bondG[] = {c.pp2}; get_part(c.pp1).bonds().insert( {collision_params.bond_centers, bondG}); } // Change type of particle being attached, to make it inert - if (p1->p.type == collision_params.part_type_to_be_glued) { - p1->p.type = collision_params.part_type_after_glueing; + if (p1->type() == collision_params.part_type_to_be_glued) { + p1->type() = collision_params.part_type_after_glueing; } - if (p2->p.type == collision_params.part_type_to_be_glued) { - p2->p.type = collision_params.part_type_after_glueing; + if (p2->type() == collision_params.part_type_to_be_glued) { + p2->type() = collision_params.part_type_after_glueing; } // Vs placement happens on the node that has p1 - if (!attach_vs_to.l.ghost) { + if (!attach_vs_to.is_ghost()) { place_vs_and_relate_to_particle(current_vs_pid, pos, attach_vs_to.identity()); // Particle storage locations may have changed due to diff --git a/src/core/collision.hpp b/src/core/collision.hpp index 8606c337e68..e90fc460335 100644 --- a/src/core/collision.hpp +++ b/src/core/collision.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _COLLISION_H -#define _COLLISION_H +#ifndef CORE_COLLISION_HPP +#define CORE_COLLISION_HPP #include "config.hpp" @@ -48,7 +48,7 @@ class Collision_parameters { public: Collision_parameters() : mode(COLLISION_MODE_OFF), distance(0.), distance2(0.), bond_centers(-1), - bond_vs(-1), bond_three_particles(-1){}; + bond_vs(-1), bond_three_particles(-1) {} /// collision handling mode, a combination of constants COLLISION_MODE_* int mode; @@ -154,7 +154,7 @@ inline void detect_collision(Particle const &p1, Particle const &p2, queue_collision(p1.p.identity, p2.p.identity); } -#endif +#endif // COLLISION_DETECTION inline double collision_detection_cutoff() { #ifdef COLLISION_DETECTION diff --git a/src/core/communication.hpp b/src/core/communication.hpp index 724f2e37dfa..c7562703965 100644 --- a/src/core/communication.hpp +++ b/src/core/communication.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _COMMUNICATION_HPP -#define _COMMUNICATION_HPP +#ifndef CORE_COMMUNICATION_HPP +#define CORE_COMMUNICATION_HPP /** \file * This file contains the asynchronous MPI communication. * @@ -58,13 +58,6 @@ extern int n_nodes; /** The communicator */ extern boost::mpi::communicator comm_cart; -/** - * Default MPI tag used by callbacks. - */ -#ifndef SOME_TAG -#define SOME_TAG 42 -#endif - namespace Communication { /** * @brief Returns a reference to the global callback class instance. diff --git a/src/core/constraints/Constraint.hpp b/src/core/constraints/Constraint.hpp index fcd0faa895c..d25bfd3453c 100644 --- a/src/core/constraints/Constraint.hpp +++ b/src/core/constraints/Constraint.hpp @@ -54,7 +54,7 @@ class Constraint { */ virtual bool fits_in_box(Utils::Vector3d const &box) const = 0; - virtual void reset_force(){}; + virtual void reset_force() {} virtual ~Constraint() = default; }; diff --git a/src/core/constraints/Constraints.hpp b/src/core/constraints/Constraints.hpp index a5179f11bed..e69f8599208 100644 --- a/src/core/constraints/Constraints.hpp +++ b/src/core/constraints/Constraints.hpp @@ -81,7 +81,7 @@ template class Constraints { reset_forces(); for (auto &p : particles) { - auto const pos = folded_position(p.r.p, box_geo); + auto const pos = folded_position(p.pos(), box_geo); ParticleForce force{}; for (auto const &c : *this) { force += c->force(p, pos, t); @@ -94,7 +94,7 @@ template class Constraints { void add_energy(const ParticleRange &particles, double time, Observable_stat &obs_energy) const { for (auto &p : particles) { - auto const pos = folded_position(p.r.p, box_geo); + auto const pos = folded_position(p.pos(), box_geo); for (auto const &constraint : *this) { constraint->add_energy(p, pos, time, obs_energy); diff --git a/src/core/constraints/HomogeneousMagneticField.hpp b/src/core/constraints/HomogeneousMagneticField.hpp index 6f34ad40f24..1f92552b159 100644 --- a/src/core/constraints/HomogeneousMagneticField.hpp +++ b/src/core/constraints/HomogeneousMagneticField.hpp @@ -41,7 +41,7 @@ class HomogeneousMagneticField : public Constraint { ParticleForce force(const Particle &p, const Utils::Vector3d &, double) override; - bool fits_in_box(Utils::Vector3d const &box) const override { return true; } + bool fits_in_box(Utils::Vector3d const &) const override { return true; } private: Utils::Vector3d m_field; diff --git a/src/core/constraints/ShapeBasedConstraint.cpp b/src/core/constraints/ShapeBasedConstraint.cpp index a6ff9ccb553..c3e0e92c0eb 100644 --- a/src/core/constraints/ShapeBasedConstraint.cpp +++ b/src/core/constraints/ShapeBasedConstraint.cpp @@ -57,11 +57,11 @@ double ShapeBasedConstraint::min_dist(const ParticleRange &particles) { std::numeric_limits::infinity(), [this](double min, Particle const &p) { IA_parameters const &ia_params = - *get_ia_param(p.p.type, part_rep.p.type); + *get_ia_param(p.type(), part_rep.type()); if (checkIfInteraction(ia_params)) { double dist; Utils::Vector3d vec; - m_shape->calculate_dist(folded_position(p.r.p, box_geo), dist, vec); + m_shape->calculate_dist(folded_position(p.pos(), box_geo), dist, vec); return std::min(min, dist); } return min; @@ -73,9 +73,9 @@ double ShapeBasedConstraint::min_dist(const ParticleRange &particles) { ParticleForce ShapeBasedConstraint::force(Particle const &p, Utils::Vector3d const &folded_pos, - double t) { + double) { ParticleForce pf{}; - IA_parameters const &ia_params = *get_ia_param(p.p.type, part_rep.p.type); + IA_parameters const &ia_params = *get_ia_param(p.type(), part_rep.type()); if (checkIfInteraction(ia_params)) { double dist = 0.; @@ -112,12 +112,12 @@ ParticleForce ShapeBasedConstraint::force(Particle const &p, #endif } } else { - runtimeErrorMsg() << "Constraint violated by particle " << p.p.identity + runtimeErrorMsg() << "Constraint violated by particle " << p.id() << " dist " << dist; } #ifdef ROTATION - part_rep.f.torque += calc_opposing_force(pf, dist_vec).torque; + part_rep.torque() += calc_opposing_force(pf, dist_vec).torque; #endif #ifdef DPD pf.f += dpd_force; @@ -133,7 +133,7 @@ void ShapeBasedConstraint::add_energy(const Particle &p, Observable_stat &obs_energy) const { double energy = 0.0; - IA_parameters const &ia_params = *get_ia_param(p.p.type, part_rep.p.type); + IA_parameters const &ia_params = *get_ia_param(p.type(), part_rep.type()); if (checkIfInteraction(ia_params)) { double dist = 0.0; @@ -147,10 +147,10 @@ void ShapeBasedConstraint::add_energy(const Particle &p, -1.0 * dist); } } else { - runtimeErrorMsg() << "Constraint violated by particle " << p.p.identity; + runtimeErrorMsg() << "Constraint violated by particle " << p.id(); } } - if (part_rep.p.type >= 0) - obs_energy.add_non_bonded_contribution(p.p.type, part_rep.p.type, energy); + if (part_rep.type() >= 0) + obs_energy.add_non_bonded_contribution(p.type(), part_rep.type(), energy); } } // namespace Constraints diff --git a/src/core/cuda_init.hpp b/src/core/cuda_init.hpp index b81d54dc551..955d4303b82 100644 --- a/src/core/cuda_init.hpp +++ b/src/core/cuda_init.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _CUDA_INIT_H -#define _CUDA_INIT_H +#ifndef CORE_CUDA_INIT_H +#define CORE_CUDA_INIT_H #include "config.hpp" diff --git a/src/core/cuda_interface.cpp b/src/core/cuda_interface.cpp index 26971b8f4eb..b04782c4a6f 100644 --- a/src/core/cuda_interface.cpp +++ b/src/core/cuda_interface.cpp @@ -40,12 +40,12 @@ static void pack_particles(ParticleRange particles, int i = 0; for (auto const &part : particles) { - buffer[i].p = static_cast(folded_position(part.r.p, box_geo)); + buffer[i].p = static_cast(folded_position(part.pos(), box_geo)); - buffer[i].identity = part.p.identity; + buffer[i].identity = part.id(); buffer[i].v = static_cast(part.m.v); #ifdef VIRTUAL_SITES - buffer[i].is_virtual = part.p.is_virtual; + buffer[i].is_virtual = part.is_virtual(); #endif #ifdef DIPOLES @@ -53,30 +53,30 @@ static void pack_particles(ParticleRange particles, #endif #ifdef LB_ELECTROHYDRODYNAMICS - buffer[i].mu_E = static_cast(part.p.mu_E); + buffer[i].mu_E = static_cast(part.mu_E()); #endif #ifdef ELECTROSTATICS - buffer[i].q = static_cast(part.p.q); + buffer[i].q = static_cast(part.q()); #endif #ifdef MASS - buffer[i].mass = static_cast(part.p.mass); + buffer[i].mass = static_cast(part.mass()); #endif #ifdef ROTATION - buffer[i].director = static_cast(part.r.calc_director()); + buffer[i].director = static_cast(part.calc_director()); #endif #ifdef ENGINE - buffer[i].swim.v_swim = static_cast(part.p.swim.v_swim); - buffer[i].swim.f_swim = static_cast(part.p.swim.f_swim); + buffer[i].swim.v_swim = static_cast(part.swimming().v_swim); + buffer[i].swim.f_swim = static_cast(part.swimming().f_swim); buffer[i].swim.director = buffer[i].director; - buffer[i].swim.push_pull = part.p.swim.push_pull; + buffer[i].swim.push_pull = part.swimming().push_pull; buffer[i].swim.dipole_length = - static_cast(part.p.swim.dipole_length); - buffer[i].swim.swimming = part.p.swim.swimming; + static_cast(part.swimming().dipole_length); + buffer[i].swim.swimming = part.swimming().swimming; #endif i++; } @@ -120,7 +120,7 @@ static void add_forces_and_torques(ParticleRange particles, for (int j = 0; j < 3; j++) { part.f.f[j] += forces[3 * i + j]; #ifdef ROTATION - part.f.torque[j] += torques[3 * i + j]; + part.torque()[j] += torques[3 * i + j]; #endif } i++; diff --git a/src/core/cuda_utils.cuh b/src/core/cuda_utils.cuh index 5eec836394b..5b80e462417 100644 --- a/src/core/cuda_utils.cuh +++ b/src/core/cuda_utils.cuh @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _CUDA_UTILS_CUH -#define _CUDA_UTILS_CUH +#ifndef CORE_CUDA_UTILS_CUH +#define CORE_CUDA_UTILS_CUH #if !defined(__CUDACC__) #error Do not include CUDA headers in normal C++-code!!! diff --git a/src/core/cuda_utils.hpp b/src/core/cuda_utils.hpp index faf49dbb472..6c3e1aa0eef 100644 --- a/src/core/cuda_utils.hpp +++ b/src/core/cuda_utils.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _CUDA_UTILS_HPP -#define _CUDA_UTILS_HPP +#ifndef CORE_CUDA_UTILS_HPP +#define CORE_CUDA_UTILS_HPP #include "config.hpp" diff --git a/src/core/dpd.cpp b/src/core/dpd.cpp index d33a79b7399..5af8296fddd 100644 --- a/src/core/dpd.cpp +++ b/src/core/dpd.cpp @@ -58,7 +58,7 @@ using Utils::Vector3d; * 3. Two particle IDs (order-independent, decorrelates particles, gets rid of * seed-per-node) */ -Vector3d dpd_noise(uint32_t pid1, uint32_t pid2) { +Vector3d dpd_noise(int pid1, int pid2) { return Random::noise_uniform( dpd.rng_counter(), dpd.rng_seed(), (pid1 < pid2) ? pid2 : pid1, (pid1 < pid2) ? pid1 : pid2); diff --git a/src/core/electrostatics_magnetostatics/coulomb.cpp b/src/core/electrostatics_magnetostatics/coulomb.cpp index f2adfd61209..2e955867a6e 100644 --- a/src/core/electrostatics_magnetostatics/coulomb.cpp +++ b/src/core/electrostatics_magnetostatics/coulomb.cpp @@ -88,8 +88,7 @@ bool sanity_checks() { bool failed = false; switch (coulomb.method) { case COULOMB_MMM1D: - if (MMM1D_sanity_checks()) - failed = true; + failed |= MMM1D_sanity_checks(); break; #ifdef P3M case COULOMB_ELC_P3M: @@ -102,8 +101,7 @@ bool sanity_checks() { // fall through case COULOMB_P3M_GPU: case COULOMB_P3M: - if (p3m_sanity_checks()) - failed = true; + failed |= p3m_sanity_checks(); break; #endif default: diff --git a/src/core/electrostatics_magnetostatics/coulomb_inline.hpp b/src/core/electrostatics_magnetostatics/coulomb_inline.hpp index e3c912c94ae..d62e6e699ac 100644 --- a/src/core/electrostatics_magnetostatics/coulomb_inline.hpp +++ b/src/core/electrostatics_magnetostatics/coulomb_inline.hpp @@ -143,7 +143,7 @@ inline Utils::Matrix pair_pressure(Particle const &p1, // energy_inline inline double pair_energy(Particle const &p1, Particle const &p2, double const q1q2, Utils::Vector3d const &d, - double dist, double dist2) { + double dist) { /* real space Coulomb */ auto E = [&]() { switch (coulomb.method) { @@ -168,7 +168,7 @@ inline double pair_energy(Particle const &p1, Particle const &p2, case COULOMB_RF: return rf_coulomb_pair_energy(q1q2, dist); case COULOMB_MMM1D: - return mmm1d_coulomb_pair_energy(q1q2, d, dist2, dist); + return mmm1d_coulomb_pair_energy(q1q2, d, dist); default: return 0.; } diff --git a/src/core/electrostatics_magnetostatics/dipole.cpp b/src/core/electrostatics_magnetostatics/dipole.cpp index 4295ac62647..1e98a62146a 100644 --- a/src/core/electrostatics_magnetostatics/dipole.cpp +++ b/src/core/electrostatics_magnetostatics/dipole.cpp @@ -71,8 +71,7 @@ bool sanity_checks() { mdlc_sanity_checks(); // fall through case DIPOLAR_P3M: - if (dp3m_sanity_checks(node_grid)) - failed = true; + failed |= dp3m_sanity_checks(node_grid); break; case DIPOLAR_MDLC_DS: mdlc_sanity_checks(); diff --git a/src/core/electrostatics_magnetostatics/dp3m_influence_function.hpp b/src/core/electrostatics_magnetostatics/dp3m_influence_function.hpp index 5d2000d8343..9407f27924c 100644 --- a/src/core/electrostatics_magnetostatics/dp3m_influence_function.hpp +++ b/src/core/electrostatics_magnetostatics/dp3m_influence_function.hpp @@ -123,10 +123,8 @@ std::vector grid_influence_function(P3MParameters const ¶ms, double fak1 = Utils::int_pow<3>(static_cast(params.mesh[0])) * 2.0 / Utils::sqr(box_l[0]); - auto const shifts = - detail::calc_meshift({params.mesh[0], params.mesh[1], params.mesh[2]}); - auto const d_ops = detail::calc_meshift( - {params.mesh[0], params.mesh[1], params.mesh[2]}, true); + auto const shifts = detail::calc_meshift(params.mesh, false); + auto const d_ops = detail::calc_meshift(params.mesh, true); Utils::Vector3i n{}; for (n[0] = n_start[0]; n[0] < n_end[0]; n[0]++) { @@ -192,10 +190,8 @@ double grid_influence_function_self_energy(P3MParameters const ¶ms, std::vector const &g) { auto const size = n_end - n_start; - auto const shifts = - detail::calc_meshift({params.mesh[0], params.mesh[1], params.mesh[2]}); - auto const d_ops = detail::calc_meshift( - {params.mesh[0], params.mesh[1], params.mesh[2]}, true); + auto const shifts = detail::calc_meshift(params.mesh, false); + auto const d_ops = detail::calc_meshift(params.mesh, true); double energy = 0.0; Utils::Vector3i n{}; diff --git a/src/core/electrostatics_magnetostatics/elc.cpp b/src/core/electrostatics_magnetostatics/elc.cpp index 6c60f0f5fab..36b2cd0cd9d 100644 --- a/src/core/electrostatics_magnetostatics/elc.cpp +++ b/src/core/electrostatics_magnetostatics/elc.cpp @@ -186,12 +186,10 @@ void distribute(std::size_t size) { */ inline void check_gap_elc(const Particle &p) { if (p.p.q != 0) { - if (p.r.p[2] < 0) + auto const z = p.r.p[2]; + if (z < 0. or z > elc_params.h) { runtimeErrorMsg() << "Particle " << p.p.identity << " entered ELC gap " - << "region by " << (p.r.p[2]); - else if (p.r.p[2] > elc_params.h) { - runtimeErrorMsg() << "Particle " << p.p.identity << " entered ELC gap " - << "region by " << (p.r.p[2] - elc_params.h); + << "region by " << ((z < 0.) ? z : z - elc_params.h); } } } @@ -526,7 +524,7 @@ void setup_PoQ(std::size_t index, double omega, clear_vec(lclimge, size); clear_vec(gblcblk, size); - auto &sc_cache = (axis == PoQ::P) ? scxcache : scycache; + auto const &sc_cache = (axis == PoQ::P) ? scxcache : scycache; std::size_t ic = 0; auto const o = (index - 1) * particles.size(); diff --git a/src/core/electrostatics_magnetostatics/elc.hpp b/src/core/electrostatics_magnetostatics/elc.hpp index f710f1d7d1a..8079a6accf3 100644 --- a/src/core/electrostatics_magnetostatics/elc.hpp +++ b/src/core/electrostatics_magnetostatics/elc.hpp @@ -26,8 +26,8 @@ * see MMM in general. The ELC method works together with any three-dimensional * method, for example \ref p3m.hpp "P3M", with metallic boundary conditions. */ -#ifndef _ELC_H -#define _ELC_H +#ifndef CORE_ELECTROSTATICS_MAGNETOSTATICS_ELC_HPP +#define CORE_ELECTROSTATICS_MAGNETOSTATICS_ELC_HPP #include "config.hpp" diff --git a/src/core/electrostatics_magnetostatics/fft.cpp b/src/core/electrostatics_magnetostatics/fft.cpp index f7e318e2032..9d386502113 100644 --- a/src/core/electrostatics_magnetostatics/fft.cpp +++ b/src/core/electrostatics_magnetostatics/fft.cpp @@ -63,7 +63,7 @@ using Utils::permute_ifield; namespace { /** This ugly function does the bookkeeping: which nodes have to * communicate to each other, when you change the node grid. - * Changing the domain decomposition requires communication. This + * Changing the regular decomposition requires communication. This * function finds (hopefully) the best way to do this. As input it * needs the two grids (@p grid1, @p grid2) and a linear list (@p node_list1) * with the node identities for @p grid1. The linear list (@p node_list2) @@ -415,21 +415,18 @@ void back_grid_comm(fft_forw_plan plan_f, fft_back_plan plan_b, } /** Calculate 'best' mapping between a 2D and 3D grid. - * Required for the communication from 3D domain decomposition - * to 2D row decomposition. + * Required for the communication from 3D regular domain + * decomposition to 2D regular row decomposition. * The dimensions of the 2D grid are resorted, if necessary, in a way * that they are multiples of the 3D grid dimensions. * \param g3d 3D grid. * \param g2d 2D grid. - * \param mult factors between 3D and 2D grid dimensions * \return index of the row direction [0,1,2]. */ -int map_3don2d_grid(int const g3d[3], int g2d[3], int mult[3]) { +int map_3don2d_grid(int const g3d[3], int g2d[3]) { int row_dir = -1; /* trivial case */ if (g3d[2] == 1) { - for (int i = 0; i < 3; i++) - mult[i] = 1; return 2; } if (g2d[0] % g3d[0] == 0) { @@ -464,13 +461,14 @@ int map_3don2d_grid(int const g3d[3], int g2d[3], int mult[3]) { g2d[0] = 1; } } - for (int i = 0; i < 3; i++) - mult[i] = g2d[i] / g3d[i]; return row_dir; } /** Calculate most square 2D grid. */ void calc_2d_grid(int n, int grid[3]) { + grid[0] = n; + grid[1] = 1; + grid[2] = 1; for (auto i = static_cast(std::sqrt(n)); i >= 1; i--) { if (n % i == 0) { grid[0] = n / i; @@ -486,9 +484,6 @@ int fft_init(const Utils::Vector3i &ca_mesh_dim, int const *ca_mesh_margin, int const *global_mesh_dim, double const *global_mesh_off, int &ks_pnum, fft_data_struct &fft, const Utils::Vector3i &grid, const boost::mpi::communicator &comm) { - int i, j; - /* helpers */ - int mult[3]; int n_grid[4][3]; /* The four node grids. */ int my_pos[4][3]; /* The position of comm.rank() in the node grids. */ @@ -500,18 +495,18 @@ int fft_init(const Utils::Vector3i &ca_mesh_dim, int const *ca_mesh_margin, fft.max_comm_size = 0; fft.max_mesh_size = 0; - for (i = 0; i < 4; i++) { + for (int i = 0; i < 4; i++) { n_id[i].resize(1 * comm.size()); n_pos[i].resize(3 * comm.size()); } /* === node grids === */ /* real space node grid (n_grid[0]) */ - for (i = 0; i < 3; i++) { + for (int i = 0; i < 3; i++) { n_grid[0][i] = grid[i]; my_pos[0][i] = node_pos[i]; } - for (i = 0; i < comm.size(); i++) { + for (int i = 0; i < comm.size(); i++) { MPI_Cart_coords(comm, i, 3, &(n_pos[0][3 * i + 0])); auto const lin_ind = get_linear_index( n_pos[0][3 * i + 0], n_pos[0][3 * i + 1], n_pos[0][3 * i + 2], @@ -522,11 +517,11 @@ int fft_init(const Utils::Vector3i &ca_mesh_dim, int const *ca_mesh_margin, /* FFT node grids (n_grid[1 - 3]) */ calc_2d_grid(comm.size(), n_grid[1]); /* resort n_grid[1] dimensions if necessary */ - fft.plan[1].row_dir = map_3don2d_grid(n_grid[0], n_grid[1], mult); + fft.plan[1].row_dir = map_3don2d_grid(n_grid[0], n_grid[1]); fft.plan[0].n_permute = 0; - for (i = 1; i < 4; i++) + for (int i = 1; i < 4; i++) fft.plan[i].n_permute = (fft.plan[1].row_dir + i) % 3; - for (i = 0; i < 3; i++) { + for (int i = 0; i < 3; i++) { n_grid[2][i] = n_grid[1][(i + 1) % 3]; n_grid[3][i] = n_grid[1][(i + 2) % 3]; } @@ -535,10 +530,10 @@ int fft_init(const Utils::Vector3i &ca_mesh_dim, int const *ca_mesh_margin, /* === communication groups === */ /* copy local mesh off real space charge assignment grid */ - for (i = 0; i < 3; i++) + for (int i = 0; i < 3; i++) fft.plan[0].new_mesh[i] = ca_mesh_dim[i]; - for (i = 1; i < 4; i++) { + for (int i = 1; i < 4; i++) { using Utils::make_span; auto group = find_comm_groups( {n_grid[i - 1][0], n_grid[i - 1][1], n_grid[i - 1][2]}, @@ -574,7 +569,7 @@ int fft_init(const Utils::Vector3i &ca_mesh_dim, int const *ca_mesh_margin, fft.plan[i].n_ffts = fft.plan[i].new_mesh[0] * fft.plan[i].new_mesh[1]; /* === send/recv block specifications === */ - for (j = 0; j < fft.plan[i].group.size(); j++) { + for (int j = 0; j < fft.plan[i].group.size(); j++) { /* send block: comm.rank() to comm-group-node i (identity: node) */ int node = fft.plan[i].group[j]; fft.plan[i].send_size[j] = calc_send_block( @@ -605,13 +600,13 @@ int fft_init(const Utils::Vector3i &ca_mesh_dim, int const *ca_mesh_margin, fft.max_comm_size = fft.plan[i].recv_size[j]; } - for (j = 0; j < 3; j++) + for (int j = 0; j < 3; j++) fft.plan[i].old_mesh[j] = fft.plan[i - 1].new_mesh[j]; - if (i == 1) + if (i == 1) { fft.plan[i].element = 1; - else { + } else { fft.plan[i].element = 2; - for (j = 0; j < fft.plan[i].group.size(); j++) { + for (int j = 0; j < fft.plan[i].group.size(); j++) { fft.plan[i].send_size[j] *= 2; fft.plan[i].recv_size[j] *= 2; } @@ -621,12 +616,12 @@ int fft_init(const Utils::Vector3i &ca_mesh_dim, int const *ca_mesh_margin, /* Factor 2 for complex fields */ fft.max_comm_size *= 2; fft.max_mesh_size = Utils::product(ca_mesh_dim); - for (i = 1; i < 4; i++) + for (int i = 1; i < 4; i++) if (2 * fft.plan[i].new_size > fft.max_mesh_size) fft.max_mesh_size = 2 * fft.plan[i].new_size; /* === pack function === */ - for (i = 1; i < 4; i++) { + for (int i = 1; i < 4; i++) { fft.plan[i].pack_function = pack_block_permute2; } ks_pnum = 6; @@ -644,7 +639,7 @@ int fft_init(const Utils::Vector3i &ca_mesh_dim, int const *ca_mesh_margin, auto *c_data = (fftw_complex *)(fft.data_buf.data()); /* === FFT Routines (Using FFTW / RFFTW package)=== */ - for (i = 1; i < 4; i++) { + for (int i = 1; i < 4; i++) { fft.plan[i].dir = FFTW_FORWARD; /* FFT plan creation.*/ @@ -658,7 +653,7 @@ int fft_init(const Utils::Vector3i &ca_mesh_dim, int const *ca_mesh_margin, /* === The BACK Direction === */ /* this is needed because slightly different functions are used */ - for (i = 1; i < 4; i++) { + for (int i = 1; i < 4; i++) { fft.back[i].dir = FFTW_BACKWARD; if (fft.init_tag) diff --git a/src/core/electrostatics_magnetostatics/fft.hpp b/src/core/electrostatics_magnetostatics/fft.hpp index cad4d7a634c..86d7cc7e1a5 100644 --- a/src/core/electrostatics_magnetostatics/fft.hpp +++ b/src/core/electrostatics_magnetostatics/fft.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _FFT_H -#define _FFT_H +#ifndef CORE_ELECTROSTATICS_MAGNETOSTATICS_FFT_HPP +#define CORE_ELECTROSTATICS_MAGNETOSTATICS_FFT_HPP /** \file * * Routines, row decomposition, data structures and communication for the diff --git a/src/core/electrostatics_magnetostatics/icc.cpp b/src/core/electrostatics_magnetostatics/icc.cpp index ea2cbcabf0c..6685acb0e3b 100644 --- a/src/core/electrostatics_magnetostatics/icc.cpp +++ b/src/core/electrostatics_magnetostatics/icc.cpp @@ -45,9 +45,11 @@ #include #include -#include +#include +#include #include +#include #include #include #include @@ -70,8 +72,8 @@ void force_calc_icc(CellStructure &cell_structure, * contributions are calculated */ inline void add_non_bonded_pair_force_icc(Particle &p1, Particle &p2, - Utils::Vector3d const &d, double dist, - double dist2) { + Utils::Vector3d const &d, + double dist) { auto forces = Coulomb::pair_force(p1, p2, d, dist); p1.f.f += std::get<0>(forces); @@ -193,7 +195,7 @@ void force_calc_icc(CellStructure &cell_structure, // calc ICC forces cell_structure.non_bonded_loop( [](Particle &p1, Particle &p2, Distance const &d) { - add_non_bonded_pair_force_icc(p1, p2, d.vec21, sqrt(d.dist2), d.dist2); + add_non_bonded_pair_force_icc(p1, p2, d.vec21, sqrt(d.dist2)); }); Coulomb::calc_long_range_force(particles); @@ -220,9 +222,6 @@ void icc_set_params(int n_icc, double convergence, double relaxation, int first_id, double eps_out, std::vector &areas, std::vector &e_in, std::vector &sigma, std::vector &normals) { - if (n_icc < 0) - throw std::runtime_error("ICC: invalid number of particles. " + - std::to_string(n_icc)); if (convergence <= 0) throw std::runtime_error("ICC: invalid convergence value. " + std::to_string(convergence)); @@ -238,14 +237,12 @@ void icc_set_params(int n_icc, double convergence, double relaxation, if (eps_out <= 0) throw std::runtime_error("ICC: invalid eps_out. " + std::to_string(eps_out)); - if (areas.size() != n_icc) - throw std::runtime_error("ICC: invalid areas vector."); - if (e_in.size() != n_icc) - throw std::runtime_error("ICC: invalid e_in vector."); - if (sigma.size() != n_icc) - throw std::runtime_error("ICC: invalid sigma vector."); - if (normals.size() != n_icc) - throw std::runtime_error("ICC: invalid normals vector."); + + assert(n_icc >= 0); + assert(areas.size() == n_icc); + assert(e_in.size() == n_icc); + assert(sigma.size() == n_icc); + assert(normals.size() == n_icc); icc_cfg.n_icc = n_icc; icc_cfg.convergence = convergence; diff --git a/src/core/electrostatics_magnetostatics/mdlc_correction.cpp b/src/core/electrostatics_magnetostatics/mdlc_correction.cpp index bcefe278fdb..f280ef1aa4f 100644 --- a/src/core/electrostatics_magnetostatics/mdlc_correction.cpp +++ b/src/core/electrostatics_magnetostatics/mdlc_correction.cpp @@ -55,12 +55,10 @@ DLC_struct dlc_params = {1e100, 0., 0., false, 0.}; */ inline void check_gap_mdlc(const Particle &p) { if (p.p.dipm != 0.0) { - if (p.r.p[2] < 0.0) + auto const z = p.r.p[2]; + if (z < 0.0 or z > dlc_params.h) { runtimeErrorMsg() << "Particle " << p.p.identity << " entered MDLC gap " - << "region by " << (p.r.p[2]); - else if (p.r.p[2] > dlc_params.h) { - runtimeErrorMsg() << "Particle " << p.p.identity << " entered MDLC gap " - << "region by " << (p.r.p[2] - dlc_params.h); + << "region by " << z - ((z < 0.) ? 0. : dlc_params.h); } } } diff --git a/src/core/electrostatics_magnetostatics/mdlc_correction.hpp b/src/core/electrostatics_magnetostatics/mdlc_correction.hpp index 542fdebecbf..166baa7731e 100644 --- a/src/core/electrostatics_magnetostatics/mdlc_correction.hpp +++ b/src/core/electrostatics_magnetostatics/mdlc_correction.hpp @@ -18,6 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ +#ifndef CORE_ELECTROSTATICS_MAGNETOSTATICS_DLC_DIPOLAR_HPP +#define CORE_ELECTROSTATICS_MAGNETOSTATICS_DLC_DIPOLAR_HPP /** \file * main header-file for MDLC (magnetic dipolar layer correction). * @@ -38,14 +40,12 @@ * Limitations: at this moment it is restricted to work with 1 cpu */ -#ifndef _DLC_DIPOLAR_H -#define _DLC_DIPOLAR_H - #include "config.hpp" -#include #ifdef DIPOLES +#include + /** parameters for the MDLC method */ struct DLC_struct { /** maximal pairwise error of the potential and force */ diff --git a/src/core/electrostatics_magnetostatics/mmm-common.hpp b/src/core/electrostatics_magnetostatics/mmm-common.hpp index c7d6f503e54..f25f76a7a6d 100644 --- a/src/core/electrostatics_magnetostatics/mmm-common.hpp +++ b/src/core/electrostatics_magnetostatics/mmm-common.hpp @@ -27,8 +27,8 @@ * directly from @cite abramowitz65a. For details, see @cite arnold02a. */ -#ifndef MMM_COMMON_H -#define MMM_COMMON_H +#ifndef CORE_ELECTROSTATICS_MAGNETOSTATICS_MMM_COMMON_HPP +#define CORE_ELECTROSTATICS_MAGNETOSTATICS_MMM_COMMON_HPP #include "mmm-modpsi.hpp" diff --git a/src/core/electrostatics_magnetostatics/mmm1d.cpp b/src/core/electrostatics_magnetostatics/mmm1d.cpp index 44194045274..035354e0ab9 100644 --- a/src/core/electrostatics_magnetostatics/mmm1d.cpp +++ b/src/core/electrostatics_magnetostatics/mmm1d.cpp @@ -35,7 +35,7 @@ #include "electrostatics_magnetostatics/mmm-common.hpp" #include "electrostatics_magnetostatics/mmm-modpsi.hpp" -#include "cells.hpp" +#include "CellStructureType.hpp" #include "errorhandling.hpp" #include "grid.hpp" #include "specfunc.hpp" @@ -147,16 +147,17 @@ void MMM1D_set_params(double switch_rad, double maxPWerror) { mpi_bcast_coulomb_params(); } -int MMM1D_sanity_checks() { +bool MMM1D_sanity_checks() { if (box_geo.periodic(0) || box_geo.periodic(1) || !box_geo.periodic(2)) { runtimeErrorMsg() << "MMM1D requires periodicity (0, 0, 1)"; - return ES_ERROR; + return true; } - if (cell_structure.decomposition_type() != CELL_STRUCTURE_NSQUARE) { + if (local_geo.cell_structure_type() != + CellStructureType::CELL_STRUCTURE_NSQUARE) { runtimeErrorMsg() << "MMM1D requires the N-square cellsystem"; - return ES_ERROR; + return true; } - return ES_OK; + return false; } int MMM1D_init() { @@ -268,9 +269,9 @@ void add_mmm1d_coulomb_pair_force(double chpref, Utils::Vector3d const &d, } double mmm1d_coulomb_pair_energy(double const chpref, Utils::Vector3d const &d, - double r2, double r) { + double r) { if (chpref == 0) - return 0; + return 0.; constexpr auto c_2pi = 2 * Utils::pi(); auto const n_modPsi = static_cast(modPsi.size() >> 1); diff --git a/src/core/electrostatics_magnetostatics/mmm1d.hpp b/src/core/electrostatics_magnetostatics/mmm1d.hpp index f6c90d99570..38c7a441998 100644 --- a/src/core/electrostatics_magnetostatics/mmm1d.hpp +++ b/src/core/electrostatics_magnetostatics/mmm1d.hpp @@ -60,7 +60,7 @@ extern MMM1DParameters mmm1d_params; void MMM1D_set_params(double switch_rad, double maxPWerror); /// check that MMM1D can run with the current parameters -int MMM1D_sanity_checks(); +bool MMM1D_sanity_checks(); /// initialize the MMM1D constants int MMM1D_init(); @@ -69,7 +69,7 @@ void add_mmm1d_coulomb_pair_force(double chpref, Utils::Vector3d const &d, double r, Utils::Vector3d &force); double mmm1d_coulomb_pair_energy(double q1q2, Utils::Vector3d const &d, - double r2, double r); + double r); /** Tuning of the parameters which are not set by the user. Tune either the * @ref MMM1DParameters::far_switch_radius_2 "switching radius" or the diff --git a/src/core/electrostatics_magnetostatics/p3m-common.hpp b/src/core/electrostatics_magnetostatics/p3m-common.hpp index 954e646e870..c5642389010 100644 --- a/src/core/electrostatics_magnetostatics/p3m-common.hpp +++ b/src/core/electrostatics_magnetostatics/p3m-common.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _P3M_COMMON_H -#define _P3M_COMMON_H +#ifndef CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_COMMON_HPP +#define CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_COMMON_HPP /** \file * Common functions for dipolar and charge P3M. * @@ -205,7 +205,7 @@ namespace detail { * \ldots, -1\right) @f$. */ std::array, 3> inline calc_meshift( - std::array const &mesh_size, bool zero_out_midpoint = false) { + int const mesh_size[3], bool zero_out_midpoint = false) { std::array, 3> ret{}; for (std::size_t i = 0; i < 3; i++) { @@ -223,4 +223,4 @@ std::array, 3> inline calc_meshift( } } // namespace detail -#endif /* _P3M_COMMON_H */ +#endif /* CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_COMMON_HPP */ diff --git a/src/core/electrostatics_magnetostatics/p3m-data_struct.hpp b/src/core/electrostatics_magnetostatics/p3m-data_struct.hpp index 8ed57989090..1840660a2de 100644 --- a/src/core/electrostatics_magnetostatics/p3m-data_struct.hpp +++ b/src/core/electrostatics_magnetostatics/p3m-data_struct.hpp @@ -49,8 +49,7 @@ struct p3m_data_struct_base { * i.e. the prefactor @f$ 2i\pi/L @f$ is missing! */ void calc_differential_operator() { - d_op = detail::calc_meshift( - {params.mesh[0], params.mesh[1], params.mesh[2]}, true); + d_op = detail::calc_meshift(params.mesh, true); } }; diff --git a/src/core/electrostatics_magnetostatics/p3m-dipolar.cpp b/src/core/electrostatics_magnetostatics/p3m-dipolar.cpp index c068a228583..30a7367dcd0 100644 --- a/src/core/electrostatics_magnetostatics/p3m-dipolar.cpp +++ b/src/core/electrostatics_magnetostatics/p3m-dipolar.cpp @@ -45,6 +45,7 @@ #include "electrostatics_magnetostatics/p3m_interpolation.hpp" #include "electrostatics_magnetostatics/p3m_send_mesh.hpp" +#include "CellStructureType.hpp" #include "Particle.hpp" #include "ParticleRange.hpp" #include "cells.hpp" @@ -115,9 +116,8 @@ static void dp3m_calc_influence_function_energy(); */ static void dp3m_compute_constants_energy_dipolar(); -static double dp3m_k_space_error(double box_size, double prefac, int mesh, - int cao, int n_c_part, double sum_q2, - double alpha_L); +static double dp3m_k_space_error(double box_size, int mesh, int cao, + int n_c_part, double sum_q2, double alpha_L); /**@}*/ /** Compute the dipolar surface terms */ @@ -128,8 +128,8 @@ static double calc_surface_term(bool force_flag, bool energy_flag, /************************************************************/ /**@{*/ -double dp3m_real_space_error(double box_size, double prefac, double r_cut_iL, - int n_c_part, double sum_q2, double alpha_L); +double dp3m_real_space_error(double box_size, double r_cut_iL, int n_c_part, + double sum_q2, double alpha_L); static void dp3m_tune_aliasing_sums(int nx, int ny, int nz, int mesh, double mesh_i, int cao, double alpha_L_i, double *alias1, double *alias2); @@ -137,9 +137,9 @@ static void dp3m_tune_aliasing_sums(int nx, int ny, int nz, int mesh, /** Compute the value of alpha through a bisection method. * Based on eq. (33) @cite wang01a. */ -double dp3m_rtbisection(double box_size, double prefac, double r_cut_iL, - int n_c_part, double sum_q2, double x1, double x2, - double xacc, double tuned_accuracy); +double dp3m_rtbisection(double box_size, double r_cut_iL, int n_c_part, + double sum_q2, double x1, double x2, double xacc, + double tuned_accuracy); /**@}*/ @@ -380,7 +380,7 @@ template struct AssignForces { double dp3m_calc_kspace_forces(bool force_flag, bool energy_flag, const ParticleRange &particles) { - int i, d, d_rs, ind, j[3]; + int i, ind, j[3]; /* k-space energy */ double k_space_energy_dip = 0.0; double tmp0, tmp1; @@ -391,9 +391,9 @@ double dp3m_calc_kspace_forces(bool force_flag, bool energy_flag, if (dp3m.sum_mu2 > 0) { /* Gather information for FFT grid inside the nodes domain (inner local * mesh) and perform forward 3D FFT (Charge Assignment Mesh). */ - std::array meshes = {dp3m.rs_mesh_dip[0].data(), - dp3m.rs_mesh_dip[1].data(), - dp3m.rs_mesh_dip[2].data()}; + std::array meshes = {{dp3m.rs_mesh_dip[0].data(), + dp3m.rs_mesh_dip[1].data(), + dp3m.rs_mesh_dip[2].data()}}; dp3m.sm.gather_grid(Utils::make_span(meshes), comm_cart, dp3m.local_mesh.dim); @@ -508,8 +508,8 @@ double dp3m_calc_kspace_forces(bool force_flag, bool energy_flag, } /* Force component loop */ - for (d = 0; d < 3; d++) { - d_rs = (d + dp3m.ks_pnum) % 3; + for (int d = 0; d < 3; d++) { + auto const d_rs = (d + dp3m.ks_pnum) % 3; ind = 0; for (j[0] = 0; j[0] < dp3m.fft.plan[3].new_mesh[0]; j[0]++) { for (j[1] = 0; j[1] < dp3m.fft.plan[3].new_mesh[1]; j[1]++) { @@ -576,8 +576,8 @@ double dp3m_calc_kspace_forces(bool force_flag, bool energy_flag, } /* Force component loop */ - for (d = 0; d < 3; d++) { /* direction in k-space: */ - d_rs = (d + dp3m.ks_pnum) % 3; + for (int d = 0; d < 3; d++) { /* direction in k-space: */ + auto const d_rs = (d + dp3m.ks_pnum) % 3; ind = 0; for (j[0] = 0; j[0] < dp3m.fft.plan[3].new_mesh[0]; j[0]++) { // j[0]=n_y @@ -614,9 +614,9 @@ double dp3m_calc_kspace_forces(bool force_flag, bool energy_flag, fft_perform_back(dp3m.rs_mesh_dip[2].data(), false, dp3m.fft, comm_cart); /* redistribute force component mesh */ - std::array meshes = {dp3m.rs_mesh_dip[0].data(), - dp3m.rs_mesh_dip[1].data(), - dp3m.rs_mesh_dip[2].data()}; + std::array meshes = {{dp3m.rs_mesh_dip[0].data(), + dp3m.rs_mesh_dip[1].data(), + dp3m.rs_mesh_dip[2].data()}}; dp3m.sm.spread_grid(Utils::make_span(meshes), comm_cart, dp3m.local_mesh.dim); @@ -748,16 +748,15 @@ double dp3m_get_accuracy(int mesh, int cao, double r_cut_iL, double *_alpha_L, // Alpha cannot be zero in the dipolar case because real_space formula breaks // down - rs_err = - dp3m_real_space_error(box_geo.length()[0], dipole.prefactor, r_cut_iL, - dp3m.sum_dip_part, dp3m.sum_mu2, 0.001); + rs_err = dp3m_real_space_error(box_geo.length()[0], r_cut_iL, + dp3m.sum_dip_part, dp3m.sum_mu2, 0.001); if (Utils::sqrt_2() * rs_err > dp3m.params.accuracy) { /* assume rs_err = ks_err -> rs_err = accuracy/sqrt(2.0) -> alpha_L */ - alpha_L = dp3m_rtbisection( - box_geo.length()[0], dipole.prefactor, r_cut_iL, dp3m.sum_dip_part, - dp3m.sum_mu2, 0.0001 * box_geo.length()[0], 5.0 * box_geo.length()[0], - 0.0001, dp3m.params.accuracy); + alpha_L = dp3m_rtbisection(box_geo.length()[0], r_cut_iL, dp3m.sum_dip_part, + dp3m.sum_mu2, 0.0001 * box_geo.length()[0], + 5.0 * box_geo.length()[0], 0.0001, + dp3m.params.accuracy); if (alpha_L == -DP3M_RTBISECTION_ERROR) { *_rs_err = -1; *_ks_err = -1; @@ -774,11 +773,10 @@ double dp3m_get_accuracy(int mesh, int cao, double r_cut_iL, double *_alpha_L, *_alpha_L = alpha_L; /* calculate real space and k-space error for this alpha_L */ - rs_err = - dp3m_real_space_error(box_geo.length()[0], dipole.prefactor, r_cut_iL, - dp3m.sum_dip_part, dp3m.sum_mu2, alpha_L); - ks_err = dp3m_k_space_error(box_geo.length()[0], dipole.prefactor, mesh, cao, - dp3m.sum_dip_part, dp3m.sum_mu2, alpha_L); + rs_err = dp3m_real_space_error(box_geo.length()[0], r_cut_iL, + dp3m.sum_dip_part, dp3m.sum_mu2, alpha_L); + ks_err = dp3m_k_space_error(box_geo.length()[0], mesh, cao, dp3m.sum_dip_part, + dp3m.sum_mu2, alpha_L); *_rs_err = rs_err; *_ks_err = ks_err; @@ -1246,9 +1244,8 @@ void dp3m_count_magnetic_particles() { REGISTER_CALLBACK(dp3m_count_magnetic_particles) /** Calculate the k-space error of dipolar-P3M */ -static double dp3m_k_space_error(double box_size, double prefac, int mesh, - int cao, int n_c_part, double sum_q2, - double alpha_L) { +static double dp3m_k_space_error(double box_size, int mesh, int cao, + int n_c_part, double sum_q2, double alpha_L) { double he_q = 0.0; auto const mesh_i = 1. / mesh; auto const alpha_L_i = 1. / alpha_L; @@ -1313,8 +1310,8 @@ void dp3m_tune_aliasing_sums(int nx, int ny, int nz, int mesh, double mesh_i, * Please note that in this more refined approach we don't use * eq. (37), but eq. (33) which maintains all the powers in alpha. */ -double dp3m_real_space_error(double box_size, double prefac, double r_cut_iL, - int n_c_part, double sum_q2, double alpha_L) { +double dp3m_real_space_error(double box_size, double r_cut_iL, int n_c_part, + double sum_q2, double alpha_L) { double d_error_f, d_cc, d_dc, d_rcut2, d_con; double d_a2, d_c, d_RCUT; @@ -1346,18 +1343,18 @@ double dp3m_real_space_error(double box_size, double prefac, double r_cut_iL, * known to lie between x1 and x2. The root, returned as rtbis, will be * refined until its accuracy is \f$\pm\f$ @p xacc. */ -double dp3m_rtbisection(double box_size, double prefac, double r_cut_iL, - int n_c_part, double sum_q2, double x1, double x2, - double xacc, double tuned_accuracy) { +double dp3m_rtbisection(double box_size, double r_cut_iL, int n_c_part, + double sum_q2, double x1, double x2, double xacc, + double tuned_accuracy) { constexpr int JJ_RTBIS_MAX = 40; auto const constant = tuned_accuracy / Utils::sqrt_2(); auto const f1 = - dp3m_real_space_error(box_size, prefac, r_cut_iL, n_c_part, sum_q2, x1) - + dp3m_real_space_error(box_size, r_cut_iL, n_c_part, sum_q2, x1) - constant; auto const f2 = - dp3m_real_space_error(box_size, prefac, r_cut_iL, n_c_part, sum_q2, x2) - + dp3m_real_space_error(box_size, r_cut_iL, n_c_part, sum_q2, x2) - constant; if (f1 * f2 >= 0.0) { runtimeErrorMsg() @@ -1369,9 +1366,9 @@ double dp3m_rtbisection(double box_size, double prefac, double r_cut_iL, double rtb = f1 < 0.0 ? (dx = x2 - x1, x1) : (dx = x1 - x2, x2); for (int j = 1; j <= JJ_RTBIS_MAX; j++) { auto const xmid = rtb + (dx *= 0.5); - auto const fmid = dp3m_real_space_error(box_size, prefac, r_cut_iL, - n_c_part, sum_q2, xmid) - - constant; + auto const fmid = + dp3m_real_space_error(box_size, r_cut_iL, n_c_part, sum_q2, xmid) - + constant; if (fmid <= 0.0) rtb = xmid; if (fabs(dx) < xacc || fmid == 0.0) @@ -1421,13 +1418,14 @@ bool dp3m_sanity_checks(const Utils::Vector3i &grid) { bool ret = false; if (!box_geo.periodic(0) || !box_geo.periodic(1) || !box_geo.periodic(2)) { - runtimeErrorMsg() << "dipolar P3M requires periodicity 1 1 1"; + runtimeErrorMsg() << "dipolar P3M requires periodicity (1, 1, 1)"; ret = true; } - if (cell_structure.decomposition_type() != CELL_STRUCTURE_DOMDEC) { - runtimeErrorMsg() << "dipolar P3M at present requires the domain " - "decomposition cell system"; + if (local_geo.cell_structure_type() != + CellStructureType::CELL_STRUCTURE_REGULAR) { + runtimeErrorMsg() << "dipolar P3M requires the regular decomposition " + "cell system"; ret = true; } @@ -1443,8 +1441,7 @@ bool dp3m_sanity_checks(const Utils::Vector3i &grid) { ret = true; } - if (dp3m_sanity_checks_boxl()) - ret = true; + ret |= dp3m_sanity_checks_boxl(); if (dp3m.params.mesh[0] == 0) { runtimeErrorMsg() << "dipolar P3M_init: mesh size is not yet set"; diff --git a/src/core/electrostatics_magnetostatics/p3m-dipolar.hpp b/src/core/electrostatics_magnetostatics/p3m-dipolar.hpp index 1053bdcdddb..d74b3ddd2a0 100644 --- a/src/core/electrostatics_magnetostatics/p3m-dipolar.hpp +++ b/src/core/electrostatics_magnetostatics/p3m-dipolar.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _P3M_MAGNETOSTATICS_H -#define _P3M_MAGNETOSTATICS_H +#ifndef CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_DIPOLAR_HPP +#define CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_DIPOLAR_HPP /** \file * P3M algorithm for long range magnetic dipole-dipole interaction. * @@ -284,4 +284,4 @@ inline double dp3m_pair_energy(Particle const &p1, Particle const &p2, } #endif /* DP3M */ -#endif /* _P3M_DIPOLES_H */ +#endif /* CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_DIPOLAR_HPP */ diff --git a/src/core/electrostatics_magnetostatics/p3m.cpp b/src/core/electrostatics_magnetostatics/p3m.cpp index 933da1cf80f..31dad4964cd 100644 --- a/src/core/electrostatics_magnetostatics/p3m.cpp +++ b/src/core/electrostatics_magnetostatics/p3m.cpp @@ -31,6 +31,7 @@ #include "electrostatics_magnetostatics/elc.hpp" #include "electrostatics_magnetostatics/p3m_influence_function.hpp" +#include "CellStructureType.hpp" #include "Particle.hpp" #include "ParticleRange.hpp" #include "cells.hpp" @@ -69,8 +70,6 @@ #include #include -using Utils::sinc; - p3m_data_struct p3m; /** \name Private Functions */ @@ -532,7 +531,7 @@ double p3m_calc_kspace_forces(bool force_flag, bool energy_flag, { std::array E_fields = { - p3m.E_mesh[0].data(), p3m.E_mesh[1].data(), p3m.E_mesh[2].data()}; + {p3m.E_mesh[0].data(), p3m.E_mesh[1].data(), p3m.E_mesh[2].data()}}; /* redistribute force component mesh */ p3m.sm.spread_grid(Utils::make_span(E_fields), comm_cart, p3m.local_mesh.dim); @@ -1218,6 +1217,7 @@ double p3m_k_space_error(double prefac, const int mesh[3], int cao, void p3m_tune_aliasing_sums(int nx, int ny, int nz, const int mesh[3], const double mesh_i[3], int cao, double alpha_L_i, double *alias1, double *alias2) { + using Utils::sinc; auto const factor1 = Utils::sqr(Utils::pi() * alpha_L_i); @@ -1283,13 +1283,13 @@ bool p3m_sanity_checks_system(const Utils::Vector3i &grid) { bool ret = false; if (!box_geo.periodic(0) || !box_geo.periodic(1) || !box_geo.periodic(2)) { - runtimeErrorMsg() << "P3M requires periodicity 1 1 1"; + runtimeErrorMsg() << "P3M requires periodicity (1, 1, 1)"; ret = true; } - if (cell_structure.decomposition_type() != CELL_STRUCTURE_DOMDEC) { - runtimeErrorMsg() - << "P3M at present requires the domain decomposition cell system"; + if (local_geo.cell_structure_type() != + CellStructureType::CELL_STRUCTURE_REGULAR) { + runtimeErrorMsg() << "P3M requires the regular decomposition cell system"; ret = true; } diff --git a/src/core/electrostatics_magnetostatics/p3m.hpp b/src/core/electrostatics_magnetostatics/p3m.hpp index 21fcda2a134..7312ef07b26 100644 --- a/src/core/electrostatics_magnetostatics/p3m.hpp +++ b/src/core/electrostatics_magnetostatics/p3m.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _P3M_H -#define _P3M_H +#ifndef CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_HPP +#define CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_HPP /** \file * P3M algorithm for long range Coulomb interaction. * @@ -242,6 +242,6 @@ inline double p3m_pair_energy(double chgfac, double dist) { return 0.0; } -#endif /* of ifdef P3M */ +#endif /* P3M */ -#endif /*of ifndef P3M_H */ +#endif /* CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_HPP */ diff --git a/src/core/electrostatics_magnetostatics/p3m_gpu.hpp b/src/core/electrostatics_magnetostatics/p3m_gpu.hpp index 1b6a9633d9c..f173a74c945 100644 --- a/src/core/electrostatics_magnetostatics/p3m_gpu.hpp +++ b/src/core/electrostatics_magnetostatics/p3m_gpu.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _P3M_GPU_H -#define _P3M_GPU_H +#ifndef CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_GPU_HPP +#define CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_GPU_HPP /** \file * P3M electrostatics on GPU. * @@ -27,4 +27,4 @@ void p3m_gpu_init(int cao, const int mesh[3], double alpha); void p3m_gpu_add_farfield_force(); -#endif /* _P3M_GPU_H */ +#endif /* CORE_ELECTROSTATICS_MAGNETOSTATICS_P3M_GPU_HPP */ diff --git a/src/core/electrostatics_magnetostatics/p3m_gpu_cuda.cu b/src/core/electrostatics_magnetostatics/p3m_gpu_cuda.cu index b03339e7927..8ae7a05e8bf 100644 --- a/src/core/electrostatics_magnetostatics/p3m_gpu_cuda.cu +++ b/src/core/electrostatics_magnetostatics/p3m_gpu_cuda.cu @@ -28,10 +28,10 @@ #ifdef ELECTROSTATICS -#define _P3M_GPU_FLOAT -//#define _P3M_GPU_REAL_DOUBLE +#define P3M_GPU_FLOAT +//#define P3M_GPU_REAL_DOUBLE -#ifdef _P3M_GPU_FLOAT +#ifdef P3M_GPU_FLOAT #define REAL_TYPE float #define FFT_TYPE_COMPLEX cufftComplex #define FFT_FORW_FFT cufftExecR2C @@ -40,7 +40,7 @@ #define FFT_PLAN_BACK_FLAG CUFFT_C2R #endif -#ifdef _P3M_GPU_REAL_DOUBLE +#ifdef P3M_GPU_REAL_DOUBLE #define REAL_TYPE double #define FFT_TYPE_COMPLEX cufftDoubleComplex #define FFT_FORW_FFT cufftExecD2Z @@ -198,7 +198,7 @@ __global__ void calculate_influence_function_device(const P3MGpuData p) { } } -#ifdef _P3M_GPU_REAL_DOUBLE +#ifdef P3M_GPU_REAL_DOUBLE __device__ double atomicAdd(double *address, double val) { unsigned long long int *address_as_ull = (unsigned long long int *)address; unsigned long long int old = *address_as_ull, assumed; diff --git a/src/core/electrostatics_magnetostatics/p3m_influence_function.hpp b/src/core/electrostatics_magnetostatics/p3m_influence_function.hpp index bfc4392b704..da1eb763d6a 100644 --- a/src/core/electrostatics_magnetostatics/p3m_influence_function.hpp +++ b/src/core/electrostatics_magnetostatics/p3m_influence_function.hpp @@ -132,8 +132,7 @@ std::vector grid_influence_function(const P3MParameters ¶ms, const Utils::Vector3d &box_l) { using namespace detail::FFT_indexing; - auto const shifts = - detail::calc_meshift({params.mesh[0], params.mesh[1], params.mesh[2]}); + auto const shifts = detail::calc_meshift(params.mesh); auto const size = n_end - n_start; diff --git a/src/core/electrostatics_magnetostatics/specfunc.cpp b/src/core/electrostatics_magnetostatics/specfunc.cpp index 027e820a6cd..496fbc32eeb 100644 --- a/src/core/electrostatics_magnetostatics/specfunc.cpp +++ b/src/core/electrostatics_magnetostatics/specfunc.cpp @@ -247,25 +247,25 @@ double hzeta(double s, double q) { } double K0(double x) { - double c, I0; if (x <= 2.0) { - c = evaluateAsChebychevSeriesAt(bk0_cs, 0.5 * x * x - 1.0); - I0 = evaluateAsChebychevSeriesAt(bi0_cs, x * x / 4.5 - 1.0); - return (-log(x) + Utils::ln_2()) * I0 + c; + auto const c = evaluateAsChebychevSeriesAt(bk0_cs, 0.5 * x * x - 1.0); + auto const i0 = evaluateAsChebychevSeriesAt(bi0_cs, x * x / 4.5 - 1.0); + return (-log(x) + Utils::ln_2()) * i0 + c; } - c = (x <= 8.0) ? evaluateAsChebychevSeriesAt(ak0_cs, (16.0 / x - 5.0) / 3.0) + auto const c = + (x <= 8.0) ? evaluateAsChebychevSeriesAt(ak0_cs, (16.0 / x - 5.0) / 3.0) : evaluateAsChebychevSeriesAt(ak02_cs, 16.0 / x - 1.0); return exp(-x) * c / sqrt(x); } double K1(double x) { - double c, I1; if (x <= 2.0) { - c = evaluateAsChebychevSeriesAt(bk1_cs, 0.5 * x * x - 1.0); - I1 = x * evaluateAsChebychevSeriesAt(bi1_cs, x * x / 4.5 - 1.0); - return (log(x) - Utils::ln_2()) * I1 + c / x; + auto const c = evaluateAsChebychevSeriesAt(bk1_cs, 0.5 * x * x - 1.0); + auto const i1 = x * evaluateAsChebychevSeriesAt(bi1_cs, x * x / 4.5 - 1.0); + return (log(x) - Utils::ln_2()) * i1 + c / x; } - c = (x <= 8.0) ? evaluateAsChebychevSeriesAt(ak1_cs, (16.0 / x - 5.0) / 3.0) + auto const c = + (x <= 8.0) ? evaluateAsChebychevSeriesAt(ak1_cs, (16.0 / x - 5.0) / 3.0) : evaluateAsChebychevSeriesAt(ak12_cs, 16.0 / x - 1.0); return exp(-x) * c / sqrt(x); } @@ -405,15 +405,15 @@ double LPK1(double x) { std::pair LPK01(double x) { if (x >= 27.) { auto const tmp = .5 * exp(-x) / sqrt(x); - auto const K0 = tmp * ak0_cs[0]; - auto const K1 = tmp * ak1_cs[0]; - return {K0, K1}; + auto const k0 = tmp * ak0_cs[0]; + auto const k1 = tmp * ak1_cs[0]; + return {k0, k1}; } if (x >= 23.) { auto const tmp = exp(-x) / sqrt(x), xx = (16. / 3.) / x - 5. / 3.; - auto const K0 = tmp * (xx * ak0_cs[1] + 0.5 * ak0_cs[0]); - auto const K1 = tmp * (xx * ak1_cs[1] + 0.5 * ak1_cs[0]); - return {K0, K1}; + auto const k0 = tmp * (xx * ak0_cs[1] + 0.5 * ak0_cs[0]); + auto const k1 = tmp * (xx * ak1_cs[1] + 0.5 * ak1_cs[0]); + return {k0, k1}; } if (x > 2) { int j = ak01_orders[((int)x) - 2]; @@ -440,9 +440,9 @@ std::pair LPK01(double x) { dd1 = tmp1; } auto const tmp = exp(-x) / sqrt(x); - auto const K0 = tmp * (0.5 * (s0[0] + x2 * d0) - dd0); - auto const K1 = tmp * (0.5 * (s1[0] + x2 * d1) - dd1); - return {K0, K1}; + auto const k0 = tmp * (0.5 * (s0[0] + x2 * d0) - dd0); + auto const k1 = tmp * (0.5 * (s1[0] + x2 * d1) - dd1); + return {k0, k1}; } /* x <= 2 */ { @@ -461,8 +461,8 @@ std::pair LPK01(double x) { dd1 = tmp1; } auto const tmp = log(x) - Utils::ln_2(); - auto K0 = -tmp * (0.5 * (bi0_cs[0] + x2 * d0) - dd0); - auto K1 = x * tmp * (0.5 * (bi1_cs[0] + x2 * d1) - dd1); + auto k0 = -tmp * (0.5 * (bi0_cs[0] + x2 * d0) - dd0); + auto k1 = x * tmp * (0.5 * (bi1_cs[0] + x2 * d1) - dd1); /* K0/K1 correction */ j = 9; @@ -478,8 +478,8 @@ std::pair LPK01(double x) { dd0 = tmp0; dd1 = tmp1; } - K0 += (0.5 * (x2 * d0 + bk0_cs[0]) - dd0); - K1 += (0.5 * (x2 * d1 + bk1_cs[0]) - dd1) / x; - return {K0, K1}; + k0 += (0.5 * (x2 * d0 + bk0_cs[0]) - dd0); + k1 += (0.5 * (x2 * d1 + bk1_cs[0]) - dd1) / x; + return {k0, k1}; } } diff --git a/src/core/energy.hpp b/src/core/energy.hpp index d9ad254c7a2..d6e40ca947e 100644 --- a/src/core/energy.hpp +++ b/src/core/energy.hpp @@ -18,15 +18,14 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ +#ifndef CORE_ENERGY_HPP +#define CORE_ENERGY_HPP /** \file * Energy calculation. * * Implementation in energy.cpp. */ -#ifndef _ENERGY_H -#define _ENERGY_H - #include "Observable_stat.hpp" #include "actor/ActorList.hpp" diff --git a/src/core/energy_inline.hpp b/src/core/energy_inline.hpp index 41cc2fa565b..6ec95207668 100644 --- a/src/core/energy_inline.hpp +++ b/src/core/energy_inline.hpp @@ -18,11 +18,11 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ +#ifndef CORE_ENERGY_INLINE_HPP +#define CORE_ENERGY_INLINE_HPP /** \file - * Implementation of the energy calculation. + * Energy calculation. */ -#ifndef ENERGY_INLINE_HPP -#define ENERGY_INLINE_HPP #include "config.hpp" @@ -180,19 +180,19 @@ inline void add_non_bonded_pair_energy(Particle const &p1, Particle const &p2, Utils::Vector3d const &d, double const dist, double const dist2, Observable_stat &obs_energy) { - IA_parameters const &ia_params = *get_ia_param(p1.p.type, p2.p.type); + IA_parameters const &ia_params = *get_ia_param(p1.type(), p2.type()); #ifdef EXCLUSIONS if (do_nonbonded(p1, p2)) #endif obs_energy.add_non_bonded_contribution( - p1.p.type, p2.p.type, + p1.type(), p2.type(), calc_non_bonded_pair_energy(p1, p2, ia_params, d, dist)); #ifdef ELECTROSTATICS if (!obs_energy.coulomb.empty()) obs_energy.coulomb[0] += - Coulomb::pair_energy(p1, p2, p1.p.q * p2.p.q, d, dist, dist2); + Coulomb::pair_energy(p1, p2, p1.q() * p2.q(), d, dist); #endif #ifdef DIPOLES @@ -204,14 +204,14 @@ inline void add_non_bonded_pair_energy(Particle const &p1, Particle const &p2, inline boost::optional calc_bonded_energy(Bonded_IA_Parameters const &iaparams, Particle const &p1, Utils::Span partners) { - auto const n_partners = partners.size(); + auto const n_partners = static_cast(partners.size()); auto p2 = (n_partners > 0) ? partners[0] : nullptr; auto p3 = (n_partners > 1) ? partners[1] : nullptr; auto p4 = (n_partners > 2) ? partners[2] : nullptr; if (n_partners == 1) { - auto const dx = box_geo.get_mi_vector(p1.r.p, p2->r.p); + auto const dx = box_geo.get_mi_vector(p1.pos(), p2->pos()); if (auto const *iap = boost::get(&iaparams)) { return iap->energy(dx); } @@ -223,15 +223,15 @@ calc_bonded_energy(Bonded_IA_Parameters const &iaparams, Particle const &p1, } #ifdef ELECTROSTATICS if (auto const *iap = boost::get(&iaparams)) { - return iap->energy(p1.p.q * p2->p.q, dx); + return iap->energy(p1.q() * p2->q(), dx); } if (auto const *iap = boost::get(&iaparams)) { return iap->energy(p1, *p2, dx); } #endif #ifdef BOND_CONSTRAINT - if (auto const *iap = boost::get(&iaparams)) { - return boost::optional(0); + if (boost::get(&iaparams)) { + return {0.}; } #endif #ifdef TABULATED @@ -239,25 +239,25 @@ calc_bonded_energy(Bonded_IA_Parameters const &iaparams, Particle const &p1, return iap->energy(dx); } #endif - if (auto const *iap = boost::get(&iaparams)) { - return boost::optional(0); + if (boost::get(&iaparams)) { + return {0.}; } throw BondUnknownTypeError(); } // 1 partner if (n_partners == 2) { if (auto const *iap = boost::get(&iaparams)) { - return iap->energy(p1.r.p, p2->r.p, p3->r.p); + return iap->energy(p1.pos(), p2->pos(), p3->pos()); } if (auto const *iap = boost::get(&iaparams)) { - return iap->energy(p1.r.p, p2->r.p, p3->r.p); + return iap->energy(p1.pos(), p2->pos(), p3->pos()); } if (auto const *iap = boost::get(&iaparams)) { - return iap->energy(p1.r.p, p2->r.p, p3->r.p); + return iap->energy(p1.pos(), p2->pos(), p3->pos()); } if (auto const *iap = boost::get(&iaparams)) { - return iap->energy(p1.r.p, p2->r.p, p3->r.p); + return iap->energy(p1.pos(), p2->pos(), p3->pos()); } - if (auto const *iap = boost::get(&iaparams)) { + if (boost::get(&iaparams)) { runtimeWarningMsg() << "Unsupported bond type " + std::to_string(iaparams.which()) + " in energy calculation."; @@ -267,12 +267,12 @@ calc_bonded_energy(Bonded_IA_Parameters const &iaparams, Particle const &p1, } // 2 partners if (n_partners == 3) { if (auto const *iap = boost::get(&iaparams)) { - return iap->energy(p2->r.p, p1.r.p, p3->r.p, p4->r.p); + return iap->energy(p2->pos(), p1.pos(), p3->pos(), p4->pos()); } if (auto const *iap = boost::get(&iaparams)) { - return iap->energy(p2->r.p, p1.r.p, p3->r.p, p4->r.p); + return iap->energy(p2->pos(), p1.pos(), p3->pos(), p4->pos()); } - if (auto const *iap = boost::get(&iaparams)) { + if (boost::get(&iaparams)) { runtimeWarningMsg() << "Unsupported bond type " + std::to_string(iaparams.which()) + " in energy calculation."; @@ -291,7 +291,7 @@ calc_bonded_energy(Bonded_IA_Parameters const &iaparams, Particle const &p1, * @param p particle for which to calculate energies */ inline double translational_kinetic_energy(Particle const &p) { - return p.p.is_virtual ? 0. : 0.5 * p.p.mass * p.m.v.norm2(); + return p.is_virtual() ? 0. : 0.5 * p.mass() * p.v().norm2(); } /** Calculate kinetic energies from rotation for one particle. @@ -299,8 +299,8 @@ inline double translational_kinetic_energy(Particle const &p) { */ inline double rotational_kinetic_energy(Particle const &p) { #ifdef ROTATION - return p.p.rotation - ? 0.5 * (hadamard_product(p.m.omega, p.m.omega) * p.p.rinertia) + return p.can_rotate() + ? 0.5 * (hadamard_product(p.omega(), p.omega()) * p.rinertia()) : 0.0; #else return 0.0; @@ -314,4 +314,4 @@ inline double calc_kinetic_energy(Particle const &p) { return translational_kinetic_energy(p) + rotational_kinetic_energy(p); } -#endif // ENERGY_INLINE_HPP +#endif // CORE_ENERGY_INLINE_HPP diff --git a/src/core/errorhandling.cpp b/src/core/errorhandling.cpp index a96f3cdc8a3..edd346882d5 100644 --- a/src/core/errorhandling.cpp +++ b/src/core/errorhandling.cpp @@ -59,8 +59,7 @@ RuntimeErrorStream _runtimeMessageStream(RuntimeError::ErrorLevel level, const std::string &file, const int line, const std::string &function) { - return RuntimeErrorStream(*runtimeErrorCollector, level, file, line, - function); + return {*runtimeErrorCollector, level, file, line, function}; } void mpi_gather_runtime_errors_local() { diff --git a/src/core/errorhandling.hpp b/src/core/errorhandling.hpp index f12016e1d46..132783cc7e6 100644 --- a/src/core/errorhandling.hpp +++ b/src/core/errorhandling.hpp @@ -87,12 +87,12 @@ RuntimeErrorStream _runtimeMessageStream(RuntimeError::ErrorLevel level, #define runtimeErrorMsg() \ ErrorHandling::_runtimeMessageStream( \ ErrorHandling::RuntimeError::ErrorLevel::ERROR, __FILE__, __LINE__, \ - __PRETTYFUNC__) + PRETTY_FUNCTION_EXTENSION) #define runtimeWarningMsg() \ ErrorHandling::_runtimeMessageStream( \ ErrorHandling::RuntimeError::ErrorLevel::WARNING, __FILE__, __LINE__, \ - __PRETTYFUNC__) + PRETTY_FUNCTION_EXTENSION) std::vector mpi_gather_runtime_errors(); diff --git a/src/core/event.cpp b/src/core/event.cpp index 883818372fd..ecc90dff929 100644 --- a/src/core/event.cpp +++ b/src/core/event.cpp @@ -25,6 +25,7 @@ */ #include "event.hpp" +#include "CellStructureType.hpp" #include "bonded_interactions/thermalized_bond.hpp" #include "cells.hpp" #include "collision.hpp" @@ -89,8 +90,8 @@ void on_program_start() { init_node_grid(); - /* initially go for domain decomposition */ - cells_re_init(CELL_STRUCTURE_DOMDEC); + /* initially go for regular decomposition */ + cells_re_init(CellStructureType::CELL_STRUCTURE_REGULAR); if (this_node == 0) { /* make sure interaction 0<->0 always exists */ diff --git a/src/core/forces.cpp b/src/core/forces.cpp index be4be006626..3847f17d123 100644 --- a/src/core/forces.cpp +++ b/src/core/forces.cpp @@ -1,4 +1,3 @@ - /* * Copyright (C) 2010-2019 The ESPResSo project * Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009,2010 @@ -189,8 +188,8 @@ void force_calc(CellStructure &cell_structure, double time_step, double kT) { #endif }, maximal_cutoff(), maximal_cutoff_bonded(), - VerletCriterion{skin, interaction_range(), coulomb_cutoff, dipole_cutoff, - collision_detection_cutoff()}); + VerletCriterion<>{skin, interaction_range(), coulomb_cutoff, + dipole_cutoff, collision_detection_cutoff()}); Constraints::constraints.add_forces(particles, get_sim_time()); diff --git a/src/core/forces_inline.hpp b/src/core/forces_inline.hpp index d906f3654e8..6e781a390da 100644 --- a/src/core/forces_inline.hpp +++ b/src/core/forces_inline.hpp @@ -18,8 +18,11 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _FORCES_INLINE_HPP -#define _FORCES_INLINE_HPP +#ifndef CORE_FORCES_INLINE_HPP +#define CORE_FORCES_INLINE_HPP +/** \file + * Force calculation. + */ #include "config.hpp" @@ -148,8 +151,8 @@ inline ParticleForce calc_non_bonded_pair_force(Particle const &p1, #ifdef GAY_BERNE // The gb force function isn't inlined, probably due to its size if (dist < ia_params.gay_berne.cut) { - pf += gb_pair_force(p1.r.calc_director(), p2.r.calc_director(), ia_params, - d, dist); + pf += gb_pair_force(p1.calc_director(), p2.calc_director(), ia_params, d, + dist); } #endif pf.f += force_factor * d; @@ -181,7 +184,7 @@ inline ParticleForce calc_opposing_force(ParticleForce const &pf, inline void add_non_bonded_pair_force(Particle &p1, Particle &p2, Utils::Vector3d const &d, double dist, double dist2) { - IA_parameters const &ia_params = *get_ia_param(p1.p.type, p2.p.type); + IA_parameters const &ia_params = *get_ia_param(p1.type(), p2.type()); ParticleForce pf{}; /***********************************************/ @@ -205,8 +208,8 @@ inline void add_non_bonded_pair_force(Particle &p1, Particle &p2, pf.f += std::get<0>(forces); #ifdef P3M // forces from the virtual charges - p1.f.f += std::get<1>(forces); - p2.f.f += std::get<2>(forces); + p1.force() += std::get<1>(forces); + p2.force() += std::get<2>(forces); #endif } #endif @@ -227,8 +230,8 @@ inline void add_non_bonded_pair_force(Particle &p1, Particle &p2, #ifdef DPD if (thermo_switch & THERMO_DPD) { auto const force = dpd_pair_force(p1, p2, ia_params, d, dist, dist2); - p1.f.f += force; - p2.f.f -= force; + p1.force() += force; + p2.force() -= force; } #endif @@ -271,18 +274,23 @@ calc_bond_pair_force(Particle const &p1, Particle const &p2, } #ifdef ELECTROSTATICS if (auto const *iap = boost::get(&iaparams)) { - return iap->force(p1.p.q * p2.p.q, dx); + return iap->force(p1.q() * p2.q(), dx); } if (auto const *iap = boost::get(&iaparams)) { return iap->force(dx); } #endif +#ifdef BOND_CONSTRAINT + if (boost::get(&iaparams)) { + return Utils::Vector3d{}; + } +#endif #ifdef TABULATED if (auto const *iap = boost::get(&iaparams)) { return iap->force(dx); } #endif - if (boost::get(&iaparams) || boost::get(&iaparams)) { + if (boost::get(&iaparams)) { return Utils::Vector3d{}; } throw BondUnknownTypeError(); @@ -290,22 +298,22 @@ calc_bond_pair_force(Particle const &p1, Particle const &p2, inline bool add_bonded_two_body_force(Bonded_IA_Parameters const &iaparams, Particle &p1, Particle &p2) { - auto const dx = box_geo.get_mi_vector(p1.r.p, p2.r.p); + auto const dx = box_geo.get_mi_vector(p1.pos(), p2.pos()); if (auto const *iap = boost::get(&iaparams)) { auto result = iap->forces(p1, p2, dx); if (result) { using std::get; - p1.f.f += get<0>(result.get()); - p2.f.f += get<1>(result.get()); + p1.force() += get<0>(result.get()); + p2.force() += get<1>(result.get()); return false; } } else { auto result = calc_bond_pair_force(p1, p2, iaparams, dx); if (result) { - p1.f.f += result.get(); - p2.f.f -= result.get(); + p1.force() += result.get(); + p2.force() -= result.get(); #ifdef NPT npt_add_virial_force_contribution(result.get(), dx); @@ -322,17 +330,17 @@ calc_bonded_three_body_force(Bonded_IA_Parameters const &iaparams, Particle const &p1, Particle const &p2, Particle const &p3) { if (auto const *iap = boost::get(&iaparams)) { - return iap->forces(p1.r.p, p2.r.p, p3.r.p); + return iap->forces(p1.pos(), p2.pos(), p3.pos()); } if (auto const *iap = boost::get(&iaparams)) { - return iap->forces(p1.r.p, p2.r.p, p3.r.p); + return iap->forces(p1.pos(), p2.pos(), p3.pos()); } if (auto const *iap = boost::get(&iaparams)) { - return iap->forces(p1.r.p, p2.r.p, p3.r.p); + return iap->forces(p1.pos(), p2.pos(), p3.pos()); } #ifdef TABULATED if (auto const *iap = boost::get(&iaparams)) { - return iap->forces(p1.r.p, p2.r.p, p3.r.p); + return iap->forces(p1.pos(), p2.pos(), p3.pos()); } #endif if (auto const *iap = boost::get(&iaparams)) { @@ -344,7 +352,7 @@ calc_bonded_three_body_force(Bonded_IA_Parameters const &iaparams, inline bool add_bonded_three_body_force(Bonded_IA_Parameters const &iaparams, Particle &p1, Particle &p2, Particle &p3) { - if (auto const *iap = boost::get(&iaparams)) { + if (boost::get(&iaparams)) { return false; } auto const result = calc_bonded_three_body_force(iaparams, p1, p2, p3); @@ -352,9 +360,9 @@ inline bool add_bonded_three_body_force(Bonded_IA_Parameters const &iaparams, using std::get; auto const &forces = result.get(); - p1.f.f += get<0>(forces); - p2.f.f += get<1>(forces); - p3.f.f += get<2>(forces); + p1.force() += get<0>(forces); + p2.force() += get<1>(forces); + p3.force() += get<2>(forces); return false; } @@ -373,11 +381,11 @@ calc_bonded_four_body_force(Bonded_IA_Parameters const &iaparams, return iap->calc_forces(p1, p2, p3, p4); } if (auto const *iap = boost::get(&iaparams)) { - return iap->forces(p2.r.p, p1.r.p, p3.r.p, p4.r.p); + return iap->forces(p2.pos(), p1.pos(), p3.pos(), p4.pos()); } #ifdef TABULATED if (auto const *iap = boost::get(&iaparams)) { - return iap->forces(p2.r.p, p1.r.p, p3.r.p, p4.r.p); + return iap->forces(p2.pos(), p1.pos(), p3.pos(), p4.pos()); } #endif throw BondUnknownTypeError(); @@ -391,10 +399,10 @@ inline bool add_bonded_four_body_force(Bonded_IA_Parameters const &iaparams, using std::get; auto const &forces = result.get(); - p1.f.f += get<0>(forces); - p2.f.f += get<1>(forces); - p3.f.f += get<2>(forces); - p4.f.f += get<3>(forces); + p1.force() += get<0>(forces); + p2.force() += get<1>(forces); + p3.force() += get<2>(forces); + p4.force() += get<3>(forces); return false; } @@ -431,4 +439,4 @@ inline bool add_bonded_force(Particle &p1, int bond_id, } } -#endif +#endif // CORE_FORCES_INLINE_HPP diff --git a/src/core/galilei.cpp b/src/core/galilei.cpp index 410e18c9cc0..72a0054fa76 100644 --- a/src/core/galilei.cpp +++ b/src/core/galilei.cpp @@ -41,7 +41,7 @@ void local_kill_particle_motion(int omega, const ParticleRange &particles) { if (omega) { p.m = {}; } else { - p.m.v = {}; + p.v() = {}; } } } @@ -52,7 +52,7 @@ void local_kill_particle_forces(int torque, const ParticleRange &particles) { if (torque) { p.f = {}; } else { - p.f.f = {}; + p.force() = {}; } } } @@ -62,11 +62,11 @@ std::pair local_system_CMS() { return boost::accumulate( cell_structure.local_particles(), std::pair{}, [](auto sum, const Particle &p) { - if (not p.p.is_virtual) { + if (not p.is_virtual()) { return std::pair{ - sum.first + - p.p.mass * unfolded_position(p.r.p, p.l.i, box_geo.length()), - sum.second + p.p.mass}; + sum.first + p.mass() * unfolded_position(p.pos(), p.image_box(), + box_geo.length()), + sum.second + p.mass()}; } return std::pair{sum.first, sum.second}; }); @@ -77,9 +77,9 @@ std::pair local_system_CMS_velocity() { return boost::accumulate( cell_structure.local_particles(), std::pair{}, [](auto sum, const Particle &p) { - if (not p.p.is_virtual) { + if (not p.is_virtual()) { return std::pair{ - sum.first + p.p.mass * p.m.v, sum.second + p.p.mass}; + sum.first + p.mass() * p.v(), sum.second + p.mass()}; } return std::pair{sum.first, sum.second}; }); @@ -88,7 +88,7 @@ std::pair local_system_CMS_velocity() { /** Remove the CMS velocity */ void local_galilei_transform(const Utils::Vector3d &cmsvel) { for (auto &p : cell_structure.local_particles()) { - p.m.v -= cmsvel; + p.v() -= cmsvel; } } diff --git a/src/core/ghosts.cpp b/src/core/ghosts.cpp index 626b74b2184..72b8e7fbc5e 100644 --- a/src/core/ghosts.cpp +++ b/src/core/ghosts.cpp @@ -132,7 +132,7 @@ static void prepare_send_buffer(CommBuf &send_buffer, /* put in data */ for (auto part_list : ghost_comm.part_lists) { if (data_parts & GHOSTTRANS_PARTNUM) { - int np = part_list->size(); + int np = static_cast(part_list->size()); archiver << np; } else { for (Particle &part : *part_list) { @@ -153,7 +153,7 @@ static void prepare_send_buffer(CommBuf &send_buffer, } #ifdef BOND_CONSTRAINT if (data_parts & GHOSTTRANS_RATTLE) { - archiver << part.rattle; + archiver << part.rattle_params(); } #endif if (data_parts & GHOSTTRANS_BONDS) { @@ -172,7 +172,7 @@ static void prepare_ghost_cell(ParticleList *cell, int size) { /* Mark particles as ghosts */ for (auto &p : *cell) { - p.l.ghost = true; + p.set_ghost(true); } } @@ -214,7 +214,7 @@ static void put_recv_buffer(CommBuf &recv_buffer, } #ifdef BOND_CONSTRAINT if (data_parts & GHOSTTRANS_RATTLE) { - archiver >> part.rattle; + archiver >> part.rattle_params(); } #endif } @@ -248,7 +248,7 @@ add_rattle_correction_from_recv_buffer(CommBuf &recv_buffer, for (Particle &part : *part_list) { ParticleRattle pr; archiver >> pr; - part.rattle += pr; + part.rattle_params() += pr; } } } @@ -276,7 +276,7 @@ static void cell_cell_transfer(const GhostCommunication &ghost_comm, auto *dst_list = ghost_comm.part_lists[pl + offset]; if (data_parts & GHOSTTRANS_PARTNUM) { - prepare_ghost_cell(dst_list, src_list->size()); + prepare_ghost_cell(dst_list, static_cast(src_list->size())); } else { auto const &src_part = *src_list; auto &dst_part = *dst_list; @@ -295,7 +295,7 @@ static void cell_cell_transfer(const GhostCommunication &ghost_comm, if (data_parts & GHOSTTRANS_POSITION) { /* ok, this is not nice, but perhaps fast */ part2.r = part1.r; - part2.r.p += ghost_comm.shift; + part2.pos() += ghost_comm.shift; } if (data_parts & GHOSTTRANS_MOMENTUM) { part2.m = part1.m; @@ -304,7 +304,7 @@ static void cell_cell_transfer(const GhostCommunication &ghost_comm, part2.f += part1.f; #ifdef BOND_CONSTRAINT if (data_parts & GHOSTTRANS_RATTLE) - part2.rattle += part1.rattle; + part2.rattle_params() += part1.rattle_params(); #endif } } @@ -390,21 +390,23 @@ void ghost_communicator(const GhostCommunicator &gcr, unsigned int data_parts) { // (which consists of already serialized data). switch (comm_type) { case GHOST_RECV: - comm.recv(node, REQ_GHOST_SEND, recv_buffer.data(), recv_buffer.size()); + comm.recv(node, REQ_GHOST_SEND, recv_buffer.data(), + static_cast(recv_buffer.size())); comm.recv(node, REQ_GHOST_SEND, recv_buffer.bonds()); break; case GHOST_SEND: - comm.send(node, REQ_GHOST_SEND, send_buffer.data(), send_buffer.size()); + comm.send(node, REQ_GHOST_SEND, send_buffer.data(), + static_cast(send_buffer.size())); comm.send(node, REQ_GHOST_SEND, send_buffer.bonds()); break; case GHOST_BCST: if (node == comm.rank()) { - boost::mpi::broadcast(comm, send_buffer.data(), send_buffer.size(), - node); + boost::mpi::broadcast(comm, send_buffer.data(), + static_cast(send_buffer.size()), node); boost::mpi::broadcast(comm, send_buffer.bonds(), node); } else { - boost::mpi::broadcast(comm, recv_buffer.data(), recv_buffer.size(), - node); + boost::mpi::broadcast(comm, recv_buffer.data(), + static_cast(recv_buffer.size()), node); boost::mpi::broadcast(comm, recv_buffer.bonds(), node); } break; diff --git a/src/core/ghosts.hpp b/src/core/ghosts.hpp index de0df7e25fc..dcfabff2e42 100644 --- a/src/core/ghosts.hpp +++ b/src/core/ghosts.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _GHOSTS_H -#define _GHOSTS_H +#ifndef CORE_GHOSTS_HPP +#define CORE_GHOSTS_HPP /** \file * Ghost particles and particle exchange. * @@ -136,10 +136,6 @@ enum : unsigned { GHOSTTRANS_BONDS = 128u }; -/** \name Data Types */ -/************************************************************/ -/**@{*/ - struct GhostCommunication { /** Communication type. */ int type; @@ -167,17 +163,9 @@ struct GhostCommunicator { std::vector communications; }; -/**@}*/ - -/** \name Exported Functions */ -/************************************************************/ -/**@{*/ - /** * @brief Do a ghost communication with caller specified data parts. */ void ghost_communicator(const GhostCommunicator &gcr, unsigned int data_parts); -/**@}*/ - #endif diff --git a/src/core/grid.cpp b/src/core/grid.cpp index 74e60db1a82..06dda104654 100644 --- a/src/core/grid.cpp +++ b/src/core/grid.cpp @@ -38,6 +38,7 @@ #include #include +#include BoxGeometry box_geo; LocalBox local_geo; @@ -79,14 +80,15 @@ LocalBox regular_decomposition(const BoxGeometry &box, } Utils::Array boundaries; - for (int dir = 0; dir < 3; dir++) { + for (std::size_t dir = 0; dir < 3; dir++) { /* left boundary ? */ boundaries[2 * dir] = (node_pos[dir] == 0); /* right boundary ? */ boundaries[2 * dir + 1] = -(node_pos[dir] == node_grid_par[dir] - 1); } - return {my_left, local_length, boundaries}; + return {my_left, local_length, boundaries, + CellStructureType::CELL_STRUCTURE_REGULAR}; } void grid_changed_box_l(const BoxGeometry &box) { diff --git a/src/core/grid.hpp b/src/core/grid.hpp index 26afae452e4..af5beaf2e71 100644 --- a/src/core/grid.hpp +++ b/src/core/grid.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _GRID_H -#define _GRID_H +#ifndef CORE_GRID_HPP +#define CORE_GRID_HPP /** @file * Domain decomposition for parallel computing. * diff --git a/src/core/grid_based_algorithms/electrokinetics.hpp b/src/core/grid_based_algorithms/electrokinetics.hpp index c7799cbfcd9..c76d6edca8a 100644 --- a/src/core/grid_based_algorithms/electrokinetics.hpp +++ b/src/core/grid_based_algorithms/electrokinetics.hpp @@ -17,8 +17,8 @@ * along with this program. If not, see . */ -#ifndef _ELECTROKINETICS_HPP -#define _ELECTROKINETICS_HPP +#ifndef CORE_GRID_BASED_ALGORITHMS_ELECTROKINETICS_HPP +#define CORE_GRID_BASED_ALGORITHMS_ELECTROKINETICS_HPP #include "config.hpp" #include "grid_based_algorithms/lb_boundaries.hpp" @@ -189,4 +189,4 @@ void ek_init_species_density_wallcharge(float *wallcharge_species_density, #endif /* CUDA */ -#endif /* ELECTROKINETICS_H */ +#endif /* CORE_GRID_BASED_ALGORITHMS_ELECTROKINETICS_HPP */ diff --git a/src/core/grid_based_algorithms/electrokinetics_cuda.cu b/src/core/grid_based_algorithms/electrokinetics_cuda.cu index 9640489e2b5..20515f2fda5 100644 --- a/src/core/grid_based_algorithms/electrokinetics_cuda.cu +++ b/src/core/grid_based_algorithms/electrokinetics_cuda.cu @@ -160,19 +160,19 @@ EKParameters ek_parameters = { nullptr, #endif // rho - {nullptr}, + {}, // species_index {-1}, // density - {0.0}, + {}, // D - {0.0}, + {}, // d - {0.0}, + {}, // valency - {0.0}, + {}, // ext_force_density - {0.0}, + {}, // node_is_catalyst nullptr, }; @@ -194,8 +194,6 @@ extern LB_node_force_density_gpu node_f, node_f_buf; extern LB_nodes_gpu *current_nodes; extern EKParameters *lb_ek_parameters; -LB_rho_v_gpu *ek_lb_device_values; - __device__ cufftReal ek_getNode(unsigned x, unsigned y, unsigned z) { auto *field = reinterpret_cast(ek_parameters_gpu->charge_potential); @@ -1093,9 +1091,8 @@ __device__ void ek_diffusion_migration_lbforce_nodecentered_stencil( } __device__ void -ek_add_advection_to_flux(unsigned int index, unsigned int *neighborindex, - unsigned int *coord, unsigned int species_index, - LB_node_force_density_gpu node_f, LB_nodes_gpu lb_node, +ek_add_advection_to_flux(unsigned int index, unsigned int *coord, + unsigned int species_index, LB_nodes_gpu lb_node, LB_parameters_gpu *ek_lbparameters_gpu) { float dx[3]; unsigned int di[3]; @@ -1376,7 +1373,6 @@ __global__ void ek_calculate_quantities(unsigned int species_index, LB_nodes_gpu lb_node, LB_node_force_density_gpu node_f, LB_parameters_gpu *ek_lbparameters_gpu, - LB_rho_v_gpu *d_v, uint64_t philox_counter) { unsigned int index = ek_getThreadIndex(); @@ -1479,8 +1475,8 @@ __global__ void ek_calculate_quantities(unsigned int species_index, /* advective contribution to flux */ if (ek_parameters_gpu->advection) - ek_add_advection_to_flux(index, neighborindex, coord, species_index, - node_f, lb_node, ek_lbparameters_gpu); + ek_add_advection_to_flux(index, coord, species_index, lb_node, + ek_lbparameters_gpu); /* fluctuation contribution to flux */ if (ek_parameters_gpu->fluctuations) @@ -1643,9 +1639,7 @@ __global__ void ek_propagate_densities(unsigned int species_index) { } } -__global__ void ek_apply_boundaries(unsigned int species_index, - LB_nodes_gpu lbnode, - LB_node_force_density_gpu node_f) { +__global__ void ek_apply_boundaries(LB_nodes_gpu lbnode) { unsigned int index = ek_getThreadIndex(); unsigned int neighborindex[22]; @@ -2140,7 +2134,7 @@ void ek_integrate() { for (unsigned i = 0; i < ek_parameters.number_of_species; i++) { KERNELCALL(ek_clear_fluxes, dim_grid, threads_per_block); KERNELCALL(ek_calculate_quantities, dim_grid, threads_per_block, i, - *current_nodes, node_f, ek_lbparameters_gpu, ek_lb_device_values, + *current_nodes, node_f, ek_lbparameters_gpu, philox_counter.value()); KERNELCALL(ek_propagate_densities, dim_grid, threads_per_block, i); @@ -2256,9 +2250,9 @@ int ek_init() { ek_parameters.bulk_viscosity * time_step / Utils::sqr(lbpar_gpu.agrid); lbpar_gpu.external_force_density = - ek_parameters.lb_ext_force_density[0] != 0 || - ek_parameters.lb_ext_force_density[1] != 0 || - ek_parameters.lb_ext_force_density[2] != 0; + ek_parameters.lb_ext_force_density[0] != 0.f || + ek_parameters.lb_ext_force_density[1] != 0.f || + ek_parameters.lb_ext_force_density[2] != 0.f; lbpar_gpu.ext_force_density = Utils::Vector3f(ek_parameters.lb_ext_force_density) * Utils::sqr(lbpar_gpu.agrid * time_step); @@ -2304,8 +2298,6 @@ int ek_init() { cuda_safe_mem(cudaMalloc((void **)&charge_gpu, sizeof(float))); - lb_get_device_values_pointer(&ek_lb_device_values); - if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "ERROR: Failed to allocate\n"); return 1; @@ -2600,14 +2592,12 @@ int ek_node_get_flux(int species, int x, int y, int z, double *flux) { KERNELCALL(ek_clear_fluxes, dim_grid, threads_per_block); KERNELCALL(ek_calculate_quantities, dim_grid, threads_per_block, static_cast(ek_parameters.species_index[species]), - *current_nodes, node_f, ek_lbparameters_gpu, ek_lb_device_values, + *current_nodes, node_f, ek_lbparameters_gpu, philox_counter.value()); reset_LB_force_densities_GPU(false); #ifdef EK_BOUNDARIES - KERNELCALL(ek_apply_boundaries, dim_grid, threads_per_block, - static_cast(ek_parameters.species_index[species]), - *current_nodes, node_f); + KERNELCALL(ek_apply_boundaries, dim_grid, threads_per_block, *current_nodes); #endif cuda_safe_mem(cudaMemcpy(fluxes.data(), ek_parameters.j, @@ -2823,14 +2813,12 @@ int ek_print_vtk_flux(int species, char *filename) { KERNELCALL(ek_clear_fluxes, dim_grid, threads_per_block); KERNELCALL(ek_calculate_quantities, dim_grid, threads_per_block, static_cast(ek_parameters.species_index[species]), - *current_nodes, node_f, ek_lbparameters_gpu, ek_lb_device_values, + *current_nodes, node_f, ek_lbparameters_gpu, philox_counter.value()); reset_LB_force_densities_GPU(false); #ifdef EK_BOUNDARIES - KERNELCALL(ek_apply_boundaries, dim_grid, threads_per_block, - static_cast(ek_parameters.species_index[species]), - *current_nodes, node_f); + KERNELCALL(ek_apply_boundaries, dim_grid, threads_per_block, *current_nodes); #endif cuda_safe_mem(cudaMemcpy(fluxes.data(), ek_parameters.j, @@ -3052,13 +3040,12 @@ int ek_print_vtk_flux_fluc(int species, char *filename) { KERNELCALL(ek_clear_fluxes, dim_grid, threads_per_block); KERNELCALL(ek_calculate_quantities, dim_grid, threads_per_block, static_cast(ek_parameters.species_index[species]), - *current_nodes, node_f, ek_lbparameters_gpu, ek_lb_device_values, + *current_nodes, node_f, ek_lbparameters_gpu, philox_counter.value()); reset_LB_force_densities_GPU(false); #ifdef EK_BOUNDARIES - KERNELCALL(ek_apply_boundaries, dim_grid, threads_per_block, - ek_parameters.species_index[species], *current_nodes, node_f); + KERNELCALL(ek_apply_boundaries, dim_grid, threads_per_block, *current_nodes); #endif cuda_safe_mem(cudaMemcpy(fluxes.data(), ek_parameters.j_fluc, @@ -3282,14 +3269,12 @@ int ek_print_vtk_flux_link(int species, char *filename) { KERNELCALL(ek_clear_fluxes, dim_grid, threads_per_block); KERNELCALL(ek_calculate_quantities, dim_grid, threads_per_block, static_cast(ek_parameters.species_index[species]), - *current_nodes, node_f, ek_lbparameters_gpu, ek_lb_device_values, + *current_nodes, node_f, ek_lbparameters_gpu, philox_counter.value()); reset_LB_force_densities_GPU(false); #ifdef EK_BOUNDARIES - KERNELCALL(ek_apply_boundaries, dim_grid, threads_per_block, - static_cast(ek_parameters.species_index[species]), - *current_nodes, node_f); + KERNELCALL(ek_apply_boundaries, dim_grid, threads_per_block, *current_nodes); #endif cuda_safe_mem(cudaMemcpy(fluxes.data(), ek_parameters.j, @@ -3631,13 +3616,11 @@ void ek_print_lbpar() { printf(" unsigned int dim_y = %d;\n", lbpar_gpu.dim[1]); printf(" unsigned int dim_z = %d;\n", lbpar_gpu.dim[2]); printf(" unsigned int number_of_nodes = %d;\n", lbpar_gpu.number_of_nodes); - printf(" int calc_val = %d;\n", lbpar_gpu.calc_val); - printf(" int external_force_density = %d;\n", - lbpar_gpu.external_force_density); + printf(" bool external_force_density = %d;\n", + static_cast(lbpar_gpu.external_force_density)); printf(" float ext_force_density[3] = {%f, %f, %f};\n", lbpar_gpu.ext_force_density[0], lbpar_gpu.ext_force_density[1], lbpar_gpu.ext_force_density[2]); - printf(" unsigned int reinit = %d;\n", lbpar_gpu.reinit); printf("}\n"); } diff --git a/src/core/grid_based_algorithms/fd-electrostatics.cuh b/src/core/grid_based_algorithms/fd-electrostatics.cuh index 567e37b5ab9..241ba7588a6 100644 --- a/src/core/grid_based_algorithms/fd-electrostatics.cuh +++ b/src/core/grid_based_algorithms/fd-electrostatics.cuh @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _FD_ELECTROSTATICS_HPP -#define _FD_ELECTROSTATICS_HPP +#ifndef CORE_GRID_BASED_ALGORITHMS_FD_ELECTROSTATICS_HPP +#define CORE_GRID_BASED_ALGORITHMS_FD_ELECTROSTATICS_HPP #include diff --git a/src/core/grid_based_algorithms/halo.hpp b/src/core/grid_based_algorithms/halo.hpp index ee61d40619a..cb2bc726c58 100644 --- a/src/core/grid_based_algorithms/halo.hpp +++ b/src/core/grid_based_algorithms/halo.hpp @@ -18,6 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ +#ifndef CORE_GRID_BASED_ALGORITHMS_HALO_HPP +#define CORE_GRID_BASED_ALGORITHMS_HALO_HPP /** \file * * Halo scheme for parallelization of lattice algorithms. @@ -25,9 +27,6 @@ * */ -#ifndef _HALO_HPP -#define _HALO_HPP - #include "grid_based_algorithms/lattice.hpp" #include @@ -103,7 +102,7 @@ struct HaloInfo { * parallelization scheme */ class HaloCommunicator { public: - HaloCommunicator(int num) : num(num){}; + HaloCommunicator(int num) : num(num) {} int num; /**< number of halo communications in the scheme */ @@ -133,4 +132,4 @@ void release_halo_communication(HaloCommunicator &hc); */ void halo_communication(const HaloCommunicator &hc, char *base); -#endif /* HALO_H */ +#endif /* CORE_GRID_BASED_ALGORITHMS_HALO_HPP */ diff --git a/src/core/grid_based_algorithms/lb.cpp b/src/core/grid_based_algorithms/lb.cpp index c623d9c96bd..7cac4ed2a0d 100644 --- a/src/core/grid_based_algorithms/lb.cpp +++ b/src/core/grid_based_algorithms/lb.cpp @@ -29,7 +29,7 @@ #include "grid_based_algorithms/lb.hpp" -#include "cells.hpp" +#include "CellStructureType.hpp" #include "communication.hpp" #include "errorhandling.hpp" #include "grid.hpp" @@ -47,12 +47,15 @@ #include #include +#include #include #include +#include #include -#include #include +#include + #include #include #include @@ -190,15 +193,15 @@ HaloCommunicator update_halo_comm = HaloCommunicator(0); /** * @brief Initialize fluid nodes. - * @param[out] fields Vector containing the fluid nodes + * @param[out] lb_fields Vector containing the fluid nodes * @param[in] lb_parameters Parameters for the LB * @param[in] lb_lattice Lattice instance */ -void lb_initialize_fields(std::vector &fields, +void lb_initialize_fields(std::vector &lb_fields, LB_Parameters const &lb_parameters, Lattice const &lb_lattice) { - fields.resize(lb_lattice.halo_grid_volume); - for (auto &field : fields) { + lb_fields.resize(lb_lattice.halo_grid_volume); + for (auto &field : lb_fields) { field.force_density = lb_parameters.ext_force_density; #ifdef LB_BOUNDARIES field.boundary = false; @@ -273,7 +276,7 @@ void lb_reinit_fluid(std::vector &lb_fields, Lattice const &lb_lattice, LB_Parameters const &lb_parameters) { lb_set_equilibrium_populations(lb_lattice, lb_parameters); - lb_initialize_fields(lbfields, lb_parameters, lb_lattice); + lb_initialize_fields(lb_fields, lb_parameters, lb_lattice); } void lb_reinit_parameters(LB_Parameters &lb_parameters) { @@ -611,8 +614,9 @@ void lb_sanity_checks(const LB_Parameters &lb_parameters) { if (lb_parameters.viscosity <= 0.0) { runtimeErrorMsg() << "Lattice Boltzmann fluid viscosity not set"; } - if (cell_structure.decomposition_type() != CELL_STRUCTURE_DOMDEC) { - runtimeErrorMsg() << "LB requires domain-decomposition cellsystem"; + if (local_geo.cell_structure_type() != + CellStructureType::CELL_STRUCTURE_REGULAR) { + runtimeErrorMsg() << "LB requires regular-decomposition cellsystem"; } } @@ -714,16 +718,9 @@ std::array lb_calc_n_from_m(const std::array &modes) { Utils::Vector19d lb_get_population_from_density_momentum_density_stress( double density, Utils::Vector3d const &momentum_density, Utils::Vector6d const &stress) { - std::array modes{density, - momentum_density[0], - momentum_density[1], - momentum_density[2], - stress[0], - stress[1], - stress[2], - stress[3], - stress[4], - stress[5]}; + std::array modes{ + {density, momentum_density[0], momentum_density[1], momentum_density[2], + stress[0], stress[1], stress[2], stress[3], stress[4], stress[5]}}; return Utils::Vector19d{lb_calc_n_from_m(modes)}; } @@ -845,18 +842,18 @@ std::array lb_apply_forces(const std::array &modes, Utils::Vector3d{modes[1], modes[2], modes[3]} + T{0.5} * f / density; auto const C = std::array{ - (1. + lb_parameters.gamma_shear) * u[0] * f[0] + - 1. / 3. * (lb_parameters.gamma_bulk - lb_parameters.gamma_shear) * - (u * f), - 1. / 2. * (1. + lb_parameters.gamma_shear) * (u[0] * f[1] + u[1] * f[0]), - (1. + lb_parameters.gamma_shear) * u[1] * f[1] + - 1. / 3. * (lb_parameters.gamma_bulk - lb_parameters.gamma_shear) * - (u * f), - 1. / 2. * (1. + lb_parameters.gamma_shear) * (u[0] * f[2] + u[2] * f[0]), - 1. / 2. * (1. + lb_parameters.gamma_shear) * (u[1] * f[2] + u[2] * f[1]), - (1. + lb_parameters.gamma_shear) * u[2] * f[2] + - 1. / 3. * (lb_parameters.gamma_bulk - lb_parameters.gamma_shear) * - (u * f)}; + {(1. + lb_parameters.gamma_shear) * u[0] * f[0] + + 1. / 3. * (lb_parameters.gamma_bulk - lb_parameters.gamma_shear) * + (u * f), + 1. / 2. * (1. + lb_parameters.gamma_shear) * (u[0] * f[1] + u[1] * f[0]), + (1. + lb_parameters.gamma_shear) * u[1] * f[1] + + 1. / 3. * (lb_parameters.gamma_bulk - lb_parameters.gamma_shear) * + (u * f), + 1. / 2. * (1. + lb_parameters.gamma_shear) * (u[0] * f[2] + u[2] * f[0]), + 1. / 2. * (1. + lb_parameters.gamma_shear) * (u[1] * f[2] + u[2] * f[1]), + (1. + lb_parameters.gamma_shear) * u[2] * f[2] + + 1. / 3. * (lb_parameters.gamma_bulk - lb_parameters.gamma_shear) * + (u * f)}}; return {{modes[0], /* update momentum modes */ @@ -974,12 +971,9 @@ void lb_integrate() { #endif } -/***********************************************************************/ -/** \name Coupling part */ -/***********************************************************************/ -/**@{*/ #ifdef ADDITIONAL_CHECKS -template int compare_buffers(T const &buff_a, T const &buff_b) { +int compare_buffers(std::array const &buff_a, + std::array const &buff_b) { if (buff_a != buff_b) { runtimeErrorMsg() << "Halo buffers are not identical"; return ES_ERROR; @@ -987,6 +981,18 @@ template int compare_buffers(T const &buff_a, T const &buff_b) { return ES_OK; } +void log_buffer_diff(std::ostream &out, int dir, Lattice::index_t index, int x, + int y, int z) { + out << "buffers differ in dir=" << dir << " at node index=" << index; + if (x != -1) + out << " x=" << x; + if (y != -1) + out << " y=" << y; + if (z != -1) + out << " z=" << z; + out << "\n"; +} + /** Check consistency of the halo regions. * Test whether the halo regions have been exchanged correctly. */ @@ -1023,8 +1029,7 @@ void lb_check_halo_regions(const LB_Fluid &lb_fluid, for (i = 0; i < D3Q19::n_vel; i++) r_buffer[i] = lb_fluid[i][index]; if (compare_buffers(s_buffer, r_buffer)) { - std::cerr << "buffers differ in dir=" << 0 << " at index=" << index - << " y=" << y << " z=" << z << "\n"; + log_buffer_diff(std::cerr, 0, index, -1, y, z); } } @@ -1047,8 +1052,7 @@ void lb_check_halo_regions(const LB_Fluid &lb_fluid, for (i = 0; i < D3Q19::n_vel; i++) r_buffer[i] = lb_fluid[i][index]; if (compare_buffers(s_buffer, r_buffer)) { - std::cerr << "buffers differ in dir=0 at index=" << index - << " y=" << y << " z=" << z << "\n"; + log_buffer_diff(std::cerr, 0, index, -1, y, z); } } } @@ -1078,8 +1082,7 @@ void lb_check_halo_regions(const LB_Fluid &lb_fluid, for (i = 0; i < D3Q19::n_vel; i++) r_buffer[i] = lb_fluid[i][index]; if (compare_buffers(s_buffer, r_buffer)) { - std::cerr << "buffers differ in dir=1 at index=" << index - << " x=" << x << " z=" << z << "\n"; + log_buffer_diff(std::cerr, 1, index, x, -1, z); } } } @@ -1103,8 +1106,7 @@ void lb_check_halo_regions(const LB_Fluid &lb_fluid, for (i = 0; i < D3Q19::n_vel; i++) r_buffer[i] = lb_fluid[i][index]; if (compare_buffers(s_buffer, r_buffer)) { - std::cerr << "buffers differ in dir=1 at index=" << index - << " x=" << x << " z=" << z << "\n"; + log_buffer_diff(std::cerr, 1, index, x, -1, z); } } } @@ -1134,9 +1136,7 @@ void lb_check_halo_regions(const LB_Fluid &lb_fluid, for (i = 0; i < D3Q19::n_vel; i++) r_buffer[i] = lb_fluid[i][index]; if (compare_buffers(s_buffer, r_buffer)) { - std::cerr << "buffers differ in dir=2 at index=" << index - << " x=" << x << " y=" << y << " z=" << lb_lattice.grid[2] - << "\n"; + log_buffer_diff(std::cerr, 2, index, x, y, lb_lattice.grid[2]); } } } @@ -1162,8 +1162,7 @@ void lb_check_halo_regions(const LB_Fluid &lb_fluid, for (i = 0; i < D3Q19::n_vel; i++) r_buffer[i] = lb_fluid[i][index]; if (compare_buffers(s_buffer, r_buffer)) { - std::cerr << "buffers differ in dir=2 at index=" << index - << " x=" << x << " y=" << y << "\n"; + log_buffer_diff(std::cerr, 2, index, x, y, -1); } } } @@ -1336,22 +1335,20 @@ void lb_calc_fluid_momentum(double *result, const LB_Parameters &lb_parameters, } momentum *= lb_parameters.agrid / lb_parameters.tau; - MPI_Reduce(momentum.data(), result, 3, MPI_DOUBLE, MPI_SUM, 0, comm_cart); + boost::mpi::reduce(comm_cart, momentum.data(), 3, result, std::plus<>(), 0); } void lb_collect_boundary_forces(double *result) { #ifdef LB_BOUNDARIES - int n_lb_boundaries = LBBoundaries::lbboundaries.size(); - std::vector boundary_forces(3 * n_lb_boundaries); - int i = 0; + auto const lbb_data_len = 3 * LBBoundaries::lbboundaries.size(); + std::vector boundary_forces(lbb_data_len); + std::size_t i = 0; for (auto it = LBBoundaries::lbboundaries.begin(); it != LBBoundaries::lbboundaries.end(); ++it, i++) - for (int j = 0; j < 3; j++) + for (std::size_t j = 0; j < 3; j++) boundary_forces[3 * i + j] = (**it).force()[j]; - MPI_Reduce(boundary_forces.data(), result, 3 * n_lb_boundaries, MPI_DOUBLE, - MPI_SUM, 0, comm_cart); + boost::mpi::reduce(comm_cart, boundary_forces.data(), + static_cast(lbb_data_len), result, std::plus<>(), 0); #endif } - -/**@}*/ diff --git a/src/core/grid_based_algorithms/lb.hpp b/src/core/grid_based_algorithms/lb.hpp index 06a98248b88..be1c91fb812 100644 --- a/src/core/grid_based_algorithms/lb.hpp +++ b/src/core/grid_based_algorithms/lb.hpp @@ -18,6 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ +#ifndef SRC_CORE_GRID_BASED_ALGORITHMS_LB_HPP +#define SRC_CORE_GRID_BASED_ALGORITHMS_LB_HPP /** \file * * %Lattice Boltzmann algorithm for hydrodynamic degrees of freedom. @@ -34,9 +36,6 @@ * Implementation in lb.cpp. */ -#ifndef LB_H -#define LB_H - #include "config.hpp" #include "grid_based_algorithms/lattice.hpp" #include "grid_based_algorithms/lb-d3q19.hpp" @@ -53,6 +52,7 @@ #include #include #include +#include #include /** Counter for the RNG */ @@ -171,11 +171,6 @@ template auto get(const LB_Fluid_Ref &lb_fluid) { /** Hydrodynamic fields of the fluid */ extern std::vector lbfields; -/************************************************************/ -/** \name Exported Functions */ -/************************************************************/ -/**@{*/ - /** Integrate the lattice-Boltzmann system for one time step. * This function performs the collision step and the streaming step. * If external force densities are present, they are applied prior to the @@ -196,9 +191,6 @@ void lb_set_population_from_density_momentum_density_stress( Lattice::index_t index, double density, Utils::Vector3d const &momentum_density, Utils::Vector6d const &stress); -#ifdef VIRTUAL_SITES_INERTIALESS_TRACERS -#endif - double lb_calc_density(std::array const &modes, const LB_Parameters &lb_parameters); Utils::Vector3d lb_calc_momentum_density(std::array const &modes, @@ -267,6 +259,9 @@ void lb_initialize_fields(std::vector &fields, Lattice const &lb_lattice); void lb_on_param_change(LBParam param); -/**@}*/ +#ifdef ADDITIONAL_CHECKS +void log_buffer_diff(std::ostream &out, int dir, Lattice::index_t index, int x, + int y, int z); +#endif // ADDITIONAL_CHECKS -#endif /* _LB_H */ +#endif // SRC_CORE_GRID_BASED_ALGORITHMS_LB_HPP diff --git a/src/core/grid_based_algorithms/lb_boundaries.cpp b/src/core/grid_based_algorithms/lb_boundaries.cpp index 8ffffeaf944..f140d06a95f 100644 --- a/src/core/grid_based_algorithms/lb_boundaries.cpp +++ b/src/core/grid_based_algorithms/lb_boundaries.cpp @@ -231,8 +231,6 @@ void lb_init_boundaries() { #endif /* defined (CUDA) && defined (LB_BOUNDARIES_GPU) */ } else if (lattice_switch == ActiveLB::CPU) { #if defined(LB_BOUNDARIES) - auto const lblattice = lb_lbfluid_get_lattice(); - boost::for_each(lbfields, [](auto &f) { f.boundary = 0; }); auto const node_pos = calc_node_pos(comm_cart); diff --git a/src/core/grid_based_algorithms/lb_collective_interface.cpp b/src/core/grid_based_algorithms/lb_collective_interface.cpp index fade1ffc743..4ea9ddcd5ef 100644 --- a/src/core/grid_based_algorithms/lb_collective_interface.cpp +++ b/src/core/grid_based_algorithms/lb_collective_interface.cpp @@ -91,10 +91,10 @@ mpi_lb_get_interpolated_density(Utils::Vector3d const &pos) { REGISTER_CALLBACK_ONE_RANK(mpi_lb_get_interpolated_density) auto mpi_lb_get_density(Utils::Vector3i const &index) { - return detail::lb_calc_fluid_kernel( - index, [&](auto const &modes, auto const &force_density) { - return lb_calc_density(modes, lbpar); - }); + return detail::lb_calc_fluid_kernel(index, + [&](auto const &modes, auto const &) { + return lb_calc_density(modes, lbpar); + }); } REGISTER_CALLBACK_ONE_RANK(mpi_lb_get_density) diff --git a/src/core/grid_based_algorithms/lb_interface.cpp b/src/core/grid_based_algorithms/lb_interface.cpp index 5d71d4683ec..a9b0414cb60 100644 --- a/src/core/grid_based_algorithms/lb_interface.cpp +++ b/src/core/grid_based_algorithms/lb_interface.cpp @@ -45,6 +45,8 @@ ActiveLB lattice_switch = ActiveLB::NONE; +ActiveLB lb_lbfluid_get_lattice_switch() { return lattice_switch; } + struct NoLBActive : public std::exception { const char *what() const noexcept override { return "LB not activated"; } }; @@ -352,12 +354,9 @@ void lb_lbfluid_set_ext_force_density(const Utils::Vector3d &force_density) { lbpar_gpu.ext_force_density[0] = static_cast(force_density[0]); lbpar_gpu.ext_force_density[1] = static_cast(force_density[1]); lbpar_gpu.ext_force_density[2] = static_cast(force_density[2]); - if (force_density[0] != 0 || force_density[1] != 0 || - force_density[2] != 0) { - lbpar_gpu.external_force_density = 1; - } else { - lbpar_gpu.external_force_density = 0; - } + lbpar_gpu.external_force_density = force_density[0] != 0. || + force_density[1] != 0. || + force_density[2] != 0.; lb_reinit_extern_nodeforce_GPU(&lbpar_gpu); #endif // CUDA @@ -408,7 +407,7 @@ void check_tau_time_step_consistency(double tau, double time_step) { auto const factor = tau / time_step; if (fabs(round(factor) - factor) / factor > eps) throw std::invalid_argument("LB tau (" + std::to_string(tau) + - ") must be integer multiple of " + ") must be an integer multiple of the " "MD time_step (" + std::to_string(time_step) + "). Factor is " + std::to_string(factor)); @@ -582,9 +581,9 @@ void lb_lbfluid_print_vtk_velocity(const std::string &filename, #ifdef CUDA host_values.resize(lbpar_gpu.number_of_nodes); lb_get_values_GPU(host_values.data()); - auto const box_l_x = lb_lbfluid_get_shape()[0]; - vtk_writer("lbfluid_gpu", [box_l_x](Utils::Vector3i const &pos) { - auto const j = box_l_x * box_l_x * pos[2] + box_l_x * pos[1] + pos[0]; + auto const box_l = lb_lbfluid_get_shape(); + vtk_writer("lbfluid_gpu", [&box_l](Utils::Vector3i const &pos) { + auto const j = box_l[0] * box_l[1] * pos[2] + box_l[0] * pos[1] + pos[0]; return Utils::Vector3d{host_values[j].v}; }); #endif // CUDA @@ -1100,10 +1099,6 @@ void lb_lbnode_set_pop(const Utils::Vector3i &ind, } } -const Lattice &lb_lbfluid_get_lattice() { return lblattice; } - -ActiveLB lb_lbfluid_get_lattice_switch() { return lattice_switch; } - static void mpi_lb_lbfluid_calc_fluid_momentum_local() { lb_calc_fluid_momentum(nullptr, lbpar, lbfields, lblattice); } diff --git a/src/core/grid_based_algorithms/lb_interface.hpp b/src/core/grid_based_algorithms/lb_interface.hpp index f146a69b29f..cbb6eb3c2ce 100644 --- a/src/core/grid_based_algorithms/lb_interface.hpp +++ b/src/core/grid_based_algorithms/lb_interface.hpp @@ -66,11 +66,6 @@ uint64_t lb_lbfluid_get_rng_state(); */ void lb_lbfluid_set_rng_state(uint64_t counter); -/** - * @brief Return the instance of the Lattice within the LB method. - */ -const Lattice &lb_lbfluid_get_lattice(); - /** * @brief Get the global variable @ref lattice_switch. */ diff --git a/src/core/grid_based_algorithms/lb_particle_coupling.cpp b/src/core/grid_based_algorithms/lb_particle_coupling.cpp index 630a756713b..5e084386467 100644 --- a/src/core/grid_based_algorithms/lb_particle_coupling.cpp +++ b/src/core/grid_based_algorithms/lb_particle_coupling.cpp @@ -148,24 +148,24 @@ Utils::Vector3d lb_viscous_coupling(Particle const &p, /* calculate fluid velocity at particle's position this is done by linear interpolation (eq. (11) @cite ahlrichs99a) */ auto const interpolated_u = - lb_lbinterpolation_get_interpolated_velocity(p.r.p) * + lb_lbinterpolation_get_interpolated_velocity(p.pos()) * lb_lbfluid_get_lattice_speed(); Utils::Vector3d v_drift = interpolated_u; #ifdef ENGINE - if (p.p.swim.swimming) { - v_drift += p.p.swim.v_swim * p.r.calc_director(); + if (p.swimming().swimming) { + v_drift += p.swimming().v_swim * p.r.calc_director(); } #endif #ifdef LB_ELECTROHYDRODYNAMICS - v_drift += p.p.mu_E; + v_drift += p.mu_E(); #endif /* calculate viscous force (eq. (9) @cite ahlrichs99a) */ - auto const force = -lb_lbcoupling_get_gamma() * (p.m.v - v_drift) + f_random; + auto const force = -lb_lbcoupling_get_gamma() * (p.v() - v_drift) + f_random; - add_md_force(p.r.p, force, time_step); + add_md_force(p.pos(), force, time_step); return force; } @@ -198,15 +198,12 @@ bool in_box(Vector const &pos, Box const &box) { * @brief Check if a position is within the local box + halo. * * @param pos Position to check - * @param local_box Geometry to check * @param halo Halo * * @return True iff the point is inside of the box up to halo. */ -template -bool in_local_domain(Vector const &pos, LocalBox const &local_box, - T const &halo = {}) { - auto const halo_vec = Vector::broadcast(halo); +inline bool in_local_domain(Vector3d const &pos, double halo = 0.) { + auto const halo_vec = Vector3d::broadcast(halo); return in_box( pos, {local_geo.my_left() - halo_vec, local_geo.my_right() + halo_vec}); @@ -223,23 +220,23 @@ bool in_local_domain(Vector const &pos, LocalBox const &local_box, bool in_local_halo(Vector3d const &pos) { auto const halo = 0.5 * lb_lbfluid_get_agrid(); - return in_local_domain(pos, local_geo, halo); + return in_local_domain(pos, halo); } #ifdef ENGINE void add_swimmer_force(Particle const &p, double time_step) { - if (p.p.swim.swimming) { + if (p.swimming().swimming) { // calculate source position const double direction = - double(p.p.swim.push_pull) * p.p.swim.dipole_length; + double(p.swimming().push_pull) * p.swimming().dipole_length; auto const director = p.r.calc_director(); - auto const source_position = p.r.p + direction * director; + auto const source_position = p.pos() + direction * director; if (not in_local_halo(source_position)) { return; } - add_md_force(source_position, p.p.swim.f_swim * director, time_step); + add_md_force(source_position, p.swimming().f_swim * director, time_step); } } #endif @@ -293,20 +290,20 @@ void lb_lbcoupling_calc_particle_lattice_ia(bool couple_virtual, }; auto couple_particle = [&](Particle &p) -> void { - if (p.p.is_virtual and !couple_virtual) + if (p.is_virtual() and !couple_virtual) return; /* Particle is in our LB volume, so this node * is responsible to adding its force */ - if (in_local_domain(p.r.p, local_geo)) { + if (in_local_domain(p.pos())) { auto const force = lb_viscous_coupling( p, noise_amplitude * f_random(p.identity()), time_step); /* add force to the particle */ - p.f.f += force; + p.force() += force; /* Particle is not in our domain, but adds to the force * density in our domain, only calculate contribution to * the LB force density. */ - } else if (in_local_halo(p.r.p)) { + } else if (in_local_halo(p.pos())) { lb_viscous_coupling(p, noise_amplitude * f_random(p.identity()), time_step); } diff --git a/src/core/grid_based_algorithms/lbgpu.cpp b/src/core/grid_based_algorithms/lbgpu.cpp index 3d01fecb276..02eeb840e0a 100644 --- a/src/core/grid_based_algorithms/lbgpu.cpp +++ b/src/core/grid_based_algorithms/lbgpu.cpp @@ -43,49 +43,41 @@ LB_parameters_gpu lbpar_gpu = { // rho - 0.0, + 0.f, // mu - 0.0, + 0.f, // viscosity - 0.0, + 0.f, // gamma_shear - 0.0, + 0.f, // gamma_bulk - 0.0, + 0.f, // gamma_odd - 0.0, + 0.f, // gamma_even - 0.0, + 0.f, // is_TRT false, // bulk_viscosity - -1.0, + -1.f, // agrid - -1.0, + -1.f, // tau - -1.0, - // dim_x; - 0, - // dim_y; - 0, - // dim_z; - 0, + -1.f, + // dim + {{{0u, 0u, 0u}}}, // number_of_nodes - 0, + 0u, #ifdef LB_BOUNDARIES_GPU // number_of_boundnodes - 0, + 0u, #endif - // calc_val - 1, - // external_force - 0, - // ext_force - {0.0, 0.0, 0.0}, - // reinit - 0, + // external_force_density + false, + // ext_force_density + {{{0.f, 0.f, 0.f}}}, // Thermal energy - 0.0}; + 0.f}; /** this is the array that stores the hydrodynamic fields for the output */ std::vector host_values(0); @@ -94,12 +86,10 @@ bool ek_initialized = false; /** (Re-)initialize the fluid according to the given value of rho. */ void lb_reinit_fluid_gpu() { - lb_reinit_parameters_gpu(); - if (lbpar_gpu.number_of_nodes != 0) { + if (lbpar_gpu.number_of_nodes != 0u) { lb_reinit_GPU(&lbpar_gpu); lb_reinit_extern_nodeforce_GPU(&lbpar_gpu); - lbpar_gpu.reinit = 1; } } @@ -107,15 +97,15 @@ void lb_reinit_fluid_gpu() { * See @cite dunweg07a and @cite dhumieres09a. */ void lb_reinit_parameters_gpu() { - lbpar_gpu.mu = 0.0; + lbpar_gpu.mu = 0.f; - if (lbpar_gpu.viscosity > 0.0 && lbpar_gpu.agrid > 0.0 && - lbpar_gpu.tau > 0.0) { + if (lbpar_gpu.viscosity > 0.f && lbpar_gpu.agrid > 0.f && + lbpar_gpu.tau > 0.f) { /* Eq. (80) @cite dunweg07a. */ lbpar_gpu.gamma_shear = 1.f - 2.f / (6.f * lbpar_gpu.viscosity + 1.f); } - if (lbpar_gpu.bulk_viscosity > 0.0) { + if (lbpar_gpu.bulk_viscosity > 0.f) { /* Eq. (81) @cite dunweg07a. */ lbpar_gpu.gamma_bulk = 1.f - 2.f / (9.f * lbpar_gpu.bulk_viscosity + 1.f); } @@ -133,10 +123,10 @@ void lb_reinit_parameters_gpu() { lbpar_gpu.gamma_bulk = lbpar_gpu.gamma_shear; lbpar_gpu.gamma_even = lbpar_gpu.gamma_shear; lbpar_gpu.gamma_odd = - -(7.0f * lbpar_gpu.gamma_even + 1.0f) / (lbpar_gpu.gamma_even + 7.0f); + -(7.f * lbpar_gpu.gamma_even + 1.f) / (lbpar_gpu.gamma_even + 7.f); } - if (lbpar_gpu.kT > 0.0) { /* fluctuating hydrodynamics ? */ + if (lbpar_gpu.kT > 0.f) { /* fluctuating hydrodynamics ? */ /* Eq. (51) @cite dunweg07a.*/ /* Note that the modes are not normalized as in the paper here! */ @@ -170,16 +160,16 @@ void lb_init_gpu() { void lb_GPU_sanity_checks() { if (this_node == 0) { - if (lbpar_gpu.agrid < 0.0) { + if (lbpar_gpu.agrid < 0.f) { runtimeErrorMsg() << "Lattice Boltzmann agrid not set"; } - if (lbpar_gpu.tau < 0.0) { + if (lbpar_gpu.tau < 0.f) { runtimeErrorMsg() << "Lattice Boltzmann time step not set"; } - if (lbpar_gpu.rho < 0.0) { + if (lbpar_gpu.rho < 0.f) { runtimeErrorMsg() << "Lattice Boltzmann fluid density not set"; } - if (lbpar_gpu.viscosity < 0.0) { + if (lbpar_gpu.viscosity < 0.f) { runtimeErrorMsg() << "Lattice Boltzmann fluid viscosity not set"; } } @@ -206,10 +196,9 @@ void lb_set_agrid_gpu(double agrid) { return std::abs(d) < std::numeric_limits::epsilon(); }); if (not commensurable) { - runtimeErrorMsg() << "Lattice spacing agrid= " << agrid + runtimeErrorMsg() << "Lattice spacing agrid=" << agrid << " is incompatible with one of the box dimensions: " - << box_geo.length()[0] << " " << box_geo.length()[1] - << " " << box_geo.length()[2]; + << "[" << box_geo.length() << "]"; } lbpar_gpu.number_of_nodes = std::accumulate(lbpar_gpu.dim.begin(), lbpar_gpu.dim.end(), 1u, diff --git a/src/core/grid_based_algorithms/lbgpu.hpp b/src/core/grid_based_algorithms/lbgpu.hpp index af0c8ebb6af..aabcdace3d7 100644 --- a/src/core/grid_based_algorithms/lbgpu.hpp +++ b/src/core/grid_based_algorithms/lbgpu.hpp @@ -55,8 +55,9 @@ struct LB_parameters_gpu { float gamma_shear; /** relaxation rate of bulk modes */ float gamma_bulk; - /** */ + /** relaxation rate of odd modes */ float gamma_odd; + /** relaxation rate of even modes */ float gamma_even; /** flag determining whether gamma_shear, gamma_odd, and gamma_even are * calculated from gamma_shear in such a way to yield a TRT LB with minimized @@ -80,14 +81,11 @@ struct LB_parameters_gpu { #ifdef LB_BOUNDARIES_GPU unsigned int number_of_boundnodes; #endif - /** to calculate and print out physical values */ - int calc_val; - int external_force_density; + bool external_force_density; Utils::Array ext_force_density; - unsigned int reinit; // Thermal energy float kT; }; @@ -147,7 +145,6 @@ struct LB_rho_v_gpu { }; void lb_GPU_sanity_checks(); -void lb_get_device_values_pointer(LB_rho_v_gpu **pointer_address); void lb_get_boundary_force_pointer(float **pointer_address); void lb_get_para_pointer(LB_parameters_gpu **pointer_address); @@ -222,7 +219,7 @@ uint64_t lb_coupling_get_rng_state_gpu(); void lb_coupling_set_rng_state_gpu(uint64_t counter); /** Calculate the node index from its coordinates */ -inline unsigned int calculate_node_index(LB_parameters_gpu const &lbpar, +inline unsigned int calculate_node_index(LB_parameters_gpu const &lbpar_gpu, Utils::Vector3i const &coord) { return static_cast( Utils::get_linear_index(coord, Utils::Vector3i(lbpar_gpu.dim))); diff --git a/src/core/grid_based_algorithms/lbgpu_cuda.cu b/src/core/grid_based_algorithms/lbgpu_cuda.cu index 3d80fd71a52..33457b5acdf 100644 --- a/src/core/grid_based_algorithms/lbgpu_cuda.cu +++ b/src/core/grid_based_algorithms/lbgpu_cuda.cu @@ -1221,7 +1221,6 @@ velocity_interpolation(LB_nodes_gpu n_a, float const *particle_position, * @param[in,out] particle_force Particle force * @param[in] part_index Particle id / thread id * @param[out] node_index Node index around (8) particle - * @param[in] d_v Local device values * @param[in] flag_cs Determine if we are at the centre (0, * typical) or at the source (1, swimmer only) * @param[in] philox_counter Philox counter @@ -1235,8 +1234,8 @@ __device__ void calc_viscous_force( LB_nodes_gpu n_a, Utils::Array &delta, CUDA_particle_data *particle_data, float *particle_force, unsigned int part_index, float *delta_j, - Utils::Array &node_index, LB_rho_v_gpu *d_v, - bool flag_cs, uint64_t philox_counter, float friction, float time_step) { + Utils::Array &node_index, bool flag_cs, + uint64_t philox_counter, float friction, float time_step) { auto const flag_cs_float = static_cast(flag_cs); // Zero out workspace #pragma unroll @@ -1264,7 +1263,7 @@ __device__ void calc_viscous_force( #ifdef ENGINE // First calculate interpolated velocity for dipole source, - // such that we don't overwrite mode, d_v, etc. for the rest of the function + // such that we don't overwrite mode, etc. for the rest of the function float direction = float(particle_data[part_index].swim.push_pull) * particle_data[part_index].swim.dipole_length; // Extrapolate position by dipole length if we are at the centre of the @@ -1863,7 +1862,6 @@ __global__ void integrate(LB_nodes_gpu n_a, LB_nodes_gpu n_b, LB_rho_v_gpu *d_v, * @param[in,out] particle_data Particle position and velocity * @param[in,out] particle_force Particle force * @param[out] node_f Local node force - * @param[in] d_v Local device values * @param[in] couple_virtual If true, virtual particles are also coupled * @param[in] philox_counter Philox counter * @param[in] friction Friction constant for the particle coupling @@ -1872,11 +1870,12 @@ __global__ void integrate(LB_nodes_gpu n_a, LB_nodes_gpu n_b, LB_rho_v_gpu *d_v, * interpolation */ template -__global__ void calc_fluid_particle_ia( - LB_nodes_gpu n_a, Utils::Span particle_data, - float *particle_force, LB_node_force_density_gpu node_f, LB_rho_v_gpu *d_v, - bool couple_virtual, uint64_t philox_counter, float friction, - float time_step) { +__global__ void +calc_fluid_particle_ia(LB_nodes_gpu n_a, + Utils::Span particle_data, + float *particle_force, LB_node_force_density_gpu node_f, + bool couple_virtual, uint64_t philox_counter, + float friction, float time_step) { unsigned int part_index = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; @@ -1892,15 +1891,14 @@ __global__ void calc_fluid_particle_ia( * force that acts back onto the fluid. */ calc_viscous_force( n_a, delta, particle_data.data(), particle_force, part_index, delta_j, - node_index, d_v, false, philox_counter, friction, time_step); + node_index, false, philox_counter, friction, time_step); calc_node_force(delta, delta_j, node_index, node_f); #ifdef ENGINE if (particle_data[part_index].swim.swimming) { calc_viscous_force( n_a, delta, particle_data.data(), particle_force, part_index, - delta_j, node_index, d_v, true, philox_counter, friction, - time_step); + delta_j, node_index, true, philox_counter, friction, time_step); calc_node_force(delta, delta_j, node_index, node_f); } #endif @@ -1982,8 +1980,8 @@ __global__ void lb_print_node(unsigned int single_nodeindex, } } -__global__ void momentum(LB_nodes_gpu n_a, LB_rho_v_gpu *d_v, - LB_node_force_density_gpu node_f, float *sum) { +__global__ void momentum(LB_nodes_gpu n_a, LB_node_force_density_gpu node_f, + float *sum) { unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; @@ -2041,10 +2039,6 @@ void lb_get_boundary_force_pointer(float **pointer_address) { #endif } -void lb_get_device_values_pointer(LB_rho_v_gpu **pointer_address) { - *pointer_address = device_rho_v; -} - /** Initialization for the lb gpu fluid called from host * @param lbpar_gpu Pointer to parameters to setup the lb field */ @@ -2227,20 +2221,19 @@ void lb_calc_particle_lattice_ia_gpu(bool couple_virtual, double friction, dim3 dim_grid = calculate_dim_grid( static_cast(device_particles.size()), 4, threads_per_block); - if (lbpar_gpu.kT > 0.0) { + if (lbpar_gpu.kT > 0.f) { assert(rng_counter_coupling_gpu); KERNELCALL(calc_fluid_particle_ia, dim_grid, threads_per_block, *current_nodes, device_particles, - gpu_get_particle_force_pointer(), node_f, device_rho_v, - couple_virtual, rng_counter_coupling_gpu->value(), - static_cast(friction), static_cast(time_step)); + gpu_get_particle_force_pointer(), node_f, couple_virtual, + rng_counter_coupling_gpu->value(), static_cast(friction), + static_cast(time_step)); } else { // We use a dummy value for the RNG counter if no temperature is set. KERNELCALL(calc_fluid_particle_ia, dim_grid, threads_per_block, *current_nodes, device_particles, - gpu_get_particle_force_pointer(), node_f, device_rho_v, - couple_virtual, 0, static_cast(friction), - static_cast(time_step)); + gpu_get_particle_force_pointer(), node_f, couple_virtual, 0, + static_cast(friction), static_cast(time_step)); } } template void lb_calc_particle_lattice_ia_gpu<8>(bool couple_virtual, @@ -2343,16 +2336,17 @@ void lb_calc_fluid_momentum_GPU(double *host_mom) { dim3 dim_grid = calculate_dim_grid(lbpar_gpu.number_of_nodes, 4, threads_per_block); - KERNELCALL(momentum, dim_grid, threads_per_block, *current_nodes, - device_rho_v, node_f, tot_momentum); + KERNELCALL(momentum, dim_grid, threads_per_block, *current_nodes, node_f, + tot_momentum); cuda_safe_mem(cudaMemcpy(host_momentum, tot_momentum, 3 * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(tot_momentum); - host_mom[0] = (double)(host_momentum[0] * lbpar_gpu.agrid / lbpar_gpu.tau); - host_mom[1] = (double)(host_momentum[1] * lbpar_gpu.agrid / lbpar_gpu.tau); - host_mom[2] = (double)(host_momentum[2] * lbpar_gpu.agrid / lbpar_gpu.tau); + auto const lattice_speed = lbpar_gpu.agrid / lbpar_gpu.tau; + host_mom[0] = static_cast(host_momentum[0] * lattice_speed); + host_mom[1] = static_cast(host_momentum[1] * lattice_speed); + host_mom[2] = static_cast(host_momentum[2] * lattice_speed); } /** Setup and call kernel for getting macroscopic fluid values of all nodes @@ -2512,9 +2506,9 @@ struct lb_lbfluid_mass_of_particle { #ifdef MASS return particle.mass; #else - return 1.; + return 1.f; #endif - }; + } }; /** Set the populations of a specific node on the GPU diff --git a/src/core/immersed_boundary/ImmersedBoundaries.cpp b/src/core/immersed_boundary/ImmersedBoundaries.cpp index a36b543bf73..81a52000d0c 100644 --- a/src/core/immersed_boundary/ImmersedBoundaries.cpp +++ b/src/core/immersed_boundary/ImmersedBoundaries.cpp @@ -80,9 +80,8 @@ void ImmersedBoundaries::init_volume_conservation(CellStructure &cs) { } static const IBMVolCons *vol_cons_parameters(Particle const &p1) { - auto it = boost::find_if(p1.bonds(), [](auto const &bond) { - return boost::get(bonded_ia_params.at(bond.bond_id()).get()) != - nullptr; + auto const it = boost::find_if(p1.bonds(), [](auto const &bond) -> bool { + return boost::get(bonded_ia_params.at(bond.bond_id()).get()); }); return (it != p1.bonds().end()) @@ -104,7 +103,6 @@ void ImmersedBoundaries::calc_volumes(CellStructure &cs) { // Loop over all particles on local node cs.bond_loop([&tempVol](Particle &p1, int bond_id, Utils::Span partners) { - auto const &iaparams = *bonded_ia_params.at(bond_id); auto vol_cons_params = vol_cons_parameters(p1); if (vol_cons_params && @@ -117,9 +115,10 @@ void ImmersedBoundaries::calc_volumes(CellStructure &cs) { // Unfold position of first node. // This is to get a continuous trajectory with no jumps when box // boundaries are crossed. - auto const x1 = unfolded_position(p1.r.p, p1.l.i, box_geo.length()); - auto const x2 = x1 + box_geo.get_mi_vector(p2.r.p, x1); - auto const x3 = x1 + box_geo.get_mi_vector(p3.r.p, x1); + auto const x1 = + unfolded_position(p1.pos(), p1.image_box(), box_geo.length()); + auto const x2 = x1 + box_geo.get_mi_vector(p2.pos(), x1); + auto const x3 = x1 + box_geo.get_mi_vector(p3.pos(), x1); // Volume of this tetrahedron // See @cite zhang01b @@ -176,12 +175,13 @@ void ImmersedBoundaries::calc_volume_force(CellStructure &cs) { // Unfold position of first node. // This is to get a continuous trajectory with no jumps when box // boundaries are crossed. - auto const x1 = unfolded_position(p1.r.p, p1.l.i, box_geo.length()); + auto const x1 = + unfolded_position(p1.pos(), p1.image_box(), box_geo.length()); // Unfolding seems to work only for the first particle of a triel // so get the others from relative vectors considering PBC - auto const a12 = box_geo.get_mi_vector(p2.r.p, x1); - auto const a13 = box_geo.get_mi_vector(p3.r.p, x1); + auto const a12 = box_geo.get_mi_vector(p2.pos(), x1); + auto const a13 = box_geo.get_mi_vector(p3.pos(), x1); // Now we have the true and good coordinates // This is eq. (9) in @cite dupin08a. diff --git a/src/core/immersed_boundary/ibm_common.cpp b/src/core/immersed_boundary/ibm_common.cpp index f66d2e7f3d3..e9e7e41422f 100644 --- a/src/core/immersed_boundary/ibm_common.cpp +++ b/src/core/immersed_boundary/ibm_common.cpp @@ -34,7 +34,7 @@ Utils::Vector3d get_ibm_particle_position(int pid) { auto *p = cell_structure.get_local_particle(pid); boost::optional opt_part{boost::none}; - if (p and not p->l.ghost) { + if (p and not p->is_ghost()) { opt_part = *p; } opt_part = boost::mpi::all_reduce(comm_cart, opt_part, @@ -46,6 +46,6 @@ Utils::Vector3d get_ibm_particle_position(int pid) { return item; }); if (opt_part) - return opt_part.get().r.p; + return opt_part.get().pos(); throw std::runtime_error("Immersed Boundary: Particle not found"); } \ No newline at end of file diff --git a/src/core/immersed_boundary/ibm_tribend.cpp b/src/core/immersed_boundary/ibm_tribend.cpp index da1cf6b0282..b776b6dc903 100644 --- a/src/core/immersed_boundary/ibm_tribend.cpp +++ b/src/core/immersed_boundary/ibm_tribend.cpp @@ -34,9 +34,9 @@ IBMTribend::calc_forces(Particle const &p1, Particle const &p2, Particle const &p3, Particle const &p4) const { // Get vectors making up the two triangles - auto const dx1 = box_geo.get_mi_vector(p1.r.p, p3.r.p); - auto const dx2 = box_geo.get_mi_vector(p2.r.p, p3.r.p); - auto const dx3 = box_geo.get_mi_vector(p4.r.p, p3.r.p); + auto const dx1 = box_geo.get_mi_vector(p1.pos(), p3.pos()); + auto const dx2 = box_geo.get_mi_vector(p2.pos(), p3.pos()); + auto const dx3 = box_geo.get_mi_vector(p4.pos(), p3.pos()); // Get normals on triangle; pointing outwards by definition of indices // sequence @@ -75,15 +75,19 @@ IBMTribend::calc_forces(Particle const &p1, Particle const &p2, // Force on particles: eq. (C.28-C.31) auto const force1 = - Pre * (vector_product(box_geo.get_mi_vector(p2.r.p, p3.r.p), v1) / Ai + - vector_product(box_geo.get_mi_vector(p3.r.p, p4.r.p), v2) / Aj); + Pre * + (vector_product(box_geo.get_mi_vector(p2.pos(), p3.pos()), v1) / Ai + + vector_product(box_geo.get_mi_vector(p3.pos(), p4.pos()), v2) / Aj); auto const force2 = - Pre * (vector_product(box_geo.get_mi_vector(p3.r.p, p1.r.p), v1) / Ai); + Pre * + (vector_product(box_geo.get_mi_vector(p3.pos(), p1.pos()), v1) / Ai); auto const force3 = - Pre * (vector_product(box_geo.get_mi_vector(p1.r.p, p2.r.p), v1) / Ai + - vector_product(box_geo.get_mi_vector(p4.r.p, p1.r.p), v2) / Aj); + Pre * + (vector_product(box_geo.get_mi_vector(p1.pos(), p2.pos()), v1) / Ai + + vector_product(box_geo.get_mi_vector(p4.pos(), p1.pos()), v2) / Aj); auto const force4 = - Pre * (vector_product(box_geo.get_mi_vector(p1.r.p, p3.r.p), v2) / Aj); + Pre * + (vector_product(box_geo.get_mi_vector(p1.pos(), p3.pos()), v2) / Aj); return std::make_tuple(force1, force2, force3, force4); } diff --git a/src/core/immersed_boundary/ibm_triel.cpp b/src/core/immersed_boundary/ibm_triel.cpp index 123ffa587ef..691a03c9d71 100644 --- a/src/core/immersed_boundary/ibm_triel.cpp +++ b/src/core/immersed_boundary/ibm_triel.cpp @@ -79,11 +79,11 @@ IBMTriel::calc_forces(Particle const &p1, Particle const &p2, // Calculate the current shape of the triangle (l,lp,cos(phi),sin(phi)); // l = length between 1 and 3 // get_mi_vector is an ESPResSo function which considers PBC - auto const vec2 = box_geo.get_mi_vector(p3.r.p, p1.r.p); + auto const vec2 = box_geo.get_mi_vector(p3.pos(), p1.pos()); auto const l = vec2.norm(); // lp = length between 1 and 2 - auto const vec1 = box_geo.get_mi_vector(p2.r.p, p1.r.p); + auto const vec1 = box_geo.get_mi_vector(p2.pos(), p1.pos()); auto const lp = vec1.norm(); // Check for sanity diff --git a/src/core/integrate.cpp b/src/core/integrate.cpp index 21515ad12d1..581f03bd7bb 100644 --- a/src/core/integrate.cpp +++ b/src/core/integrate.cpp @@ -196,7 +196,7 @@ void integrator_step_2(ParticleRange &particles, double kT) { break; #ifdef STOKESIAN_DYNAMICS case INTEG_METHOD_SD: - stokesian_dynamics_step_2(particles); + // Nothing break; #endif // STOKESIAN_DYNAMICS default: diff --git a/src/core/integrators/steepest_descent.hpp b/src/core/integrators/steepest_descent.hpp index ce04ced64de..fe20f7cbee8 100644 --- a/src/core/integrators/steepest_descent.hpp +++ b/src/core/integrators/steepest_descent.hpp @@ -19,8 +19,8 @@ * along with this program. If not, see . */ -#ifndef __STEEPEST_DESCENT_HPP -#define __STEEPEST_DESCENT_HPP +#ifndef CORE_INTEGRATORS_STEEPEST_DESCENT_HPP +#define CORE_INTEGRATORS_STEEPEST_DESCENT_HPP #include "ParticleRange.hpp" @@ -64,4 +64,4 @@ void steepest_descent_init(double f_max, double gamma, double max_displacement); */ bool steepest_descent_step(const ParticleRange &particles); -#endif /* __STEEPEST_DESCENT_HPP */ +#endif /* CORE_INTEGRATORS_STEEPEST_DESCENT_HPP */ diff --git a/src/core/integrators/stokesian_dynamics_inline.hpp b/src/core/integrators/stokesian_dynamics_inline.hpp index 238da65e55b..c7b5e0ec1e0 100644 --- a/src/core/integrators/stokesian_dynamics_inline.hpp +++ b/src/core/integrators/stokesian_dynamics_inline.hpp @@ -54,7 +54,5 @@ inline void stokesian_dynamics_step_1(const ParticleRange &particles, increment_sim_time(time_step); } -inline void stokesian_dynamics_step_2(const ParticleRange &particles) {} - #endif // STOKESIAN_DYNAMICS #endif diff --git a/src/core/integrators/velocity_verlet_npt.cpp b/src/core/integrators/velocity_verlet_npt.cpp index 1a758952378..c56279b9031 100644 --- a/src/core/integrators/velocity_verlet_npt.cpp +++ b/src/core/integrators/velocity_verlet_npt.cpp @@ -45,7 +45,6 @@ void velocity_verlet_npt_propagate_vel_final(const ParticleRange &particles, double time_step) { - extern IsotropicNptThermostat npt_iso; nptiso.p_vel = {}; for (auto &p : particles) { @@ -69,7 +68,6 @@ void velocity_verlet_npt_propagate_vel_final(const ParticleRange &particles, /** Scale and communicate instantaneous NpT pressure */ void velocity_verlet_npt_finalize_p_inst(double time_step) { - extern IsotropicNptThermostat npt_iso; /* finalize derivation of p_inst */ nptiso.p_inst = 0.0; for (int i = 0; i < 3; i++) { @@ -105,7 +103,6 @@ void velocity_verlet_npt_propagate_pos(const ParticleRange &particles, pow(nptiso.volume, 2.0 / nptiso.dimension); nptiso.volume += nptiso.inv_piston * nptiso.p_diff * 0.5 * time_step; if (nptiso.volume < 0.0) { - runtimeErrorMsg() << "your choice of piston= " << nptiso.piston << ", dt= " << time_step << ", p_diff= " << nptiso.p_diff @@ -163,7 +160,6 @@ void velocity_verlet_npt_propagate_pos(const ParticleRange &particles, void velocity_verlet_npt_propagate_vel(const ParticleRange &particles, double time_step) { - extern IsotropicNptThermostat npt_iso; nptiso.p_vel = {}; for (auto &p : particles) { diff --git a/src/core/interactions.hpp b/src/core/interactions.hpp index e891b74af97..9e5b79c0a59 100644 --- a/src/core/interactions.hpp +++ b/src/core/interactions.hpp @@ -21,8 +21,8 @@ /** \file * This file contains the asynchronous MPI communication for interactions. */ -#ifndef _INTERACTIONS_HPP -#define _INTERACTIONS_HPP +#ifndef CORE_INTERACTIONS_HPP +#define CORE_INTERACTIONS_HPP /** Calculate the maximal cutoff of all interactions. */ double maximal_cutoff(); diff --git a/src/core/io/mpiio/mpiio.cpp b/src/core/io/mpiio/mpiio.cpp index 35d79e3751c..11ebdea93b8 100644 --- a/src/core/io/mpiio/mpiio.cpp +++ b/src/core/io/mpiio/mpiio.cpp @@ -18,33 +18,33 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -/** \file +/** @file * * Concerning the file layouts. * - Scalar arrays are written like this: - * rank0 --- rank1 --- rank2 ... + * rank0 --- rank1 --- rank2 ... * where each rank dumps its scalars in the ordering of the particles. * - Vector arrays are written in the rank ordering like scalar arrays. - * The ordering of the vector data is: v[0] v[1] v[2], so the data + * The ordering of the vector data is: v[0] v[1] v[2], so the data * looks like this: - * v1[0] v1[1] v1[2] v2[0] v2[1] v2[2] v3[0] ... + * v1[0] v1[1] v1[2] v2[0] v2[1] v2[2] v3[0] ... * * To be able to determine the rank boundaries (a multiple of - * nlocalparts), the file 1.pref is written, which dumps the Exscan - * results of nlocalparts, i.e. the prefixes in scalar arrays: + * @c nlocalparts), the file 1.pref is written, which dumps the partial + * sum of @c nlocalparts, i.e. the prefixes in scalar arrays: * - 1.prefs looks like this: - * 0 nlocalpats_rank0 nlocalparts_rank0+nlocalparts_rank1 ... + * 0 nlocalpats_rank0 nlocalparts_rank0+nlocalparts_rank1 ... * * Bonds are dumped as two arrays, namely 1.bond which stores the * bonding partners of the particles and 1.boff which stores the * iteration indices for each particle. - * - 1.boff is a scalar array of size (nlocalpart + 1) per rank. - * - The last element (at index nlocalpart) of 1.boff's subpart - * [rank * (nlocalpart + 1) : (rank + 1) * (nlocalpart + 1)] - * determines the number of bonds for processor "rank". + * - 1.boff is a scalar array of size (nlocalpart + 1) per rank. + * - The last element (at index @c nlocalpart) of 1.boff's subpart + * [rank * (nlocalpart + 1) : (rank + 1) * (nlocalpart + 1)] + * determines the number of bonds for processor @c rank. * - In this subarray one can find the bonding partners of particle - * id[i]. The iteration indices for local part of 1.bonds are: - * subarray[i] : subarray[i+1] + * id[i]. The iteration indices for local part of 1.bonds are: + * subarray[i] : subarray[i+1] * - Take a look at the bond input code. It's easy to understand. */ @@ -66,152 +66,208 @@ #include #include +#include #include #include #include #include +#include #include #include +#include #include #include namespace Mpiio { -/** Dumps arr of size len starting from prefix pref of type T using - * MPI_T as MPI datatype. Beware, that T and MPI_T have to match! +/** + * @brief Fatal error handler. + * On 1 MPI rank the error is recoverable and an exception is thrown. + * On more than 1 MPI rank the error is not recoverable. + * @param msg Custom error message + * @param fn File path + * @param extra Extra context + */ +static bool fatal_error(char const *msg, std::string const &fn = "", + std::string const &extra = "") { + std::stringstream what; + what << "MPI-IO Error: " << msg; + if (not fn.empty()) { + what << " \"" << fn << "\""; + } + if (not extra.empty()) { + what << " :" << extra; + } + int size; + MPI_Comm_size(MPI_COMM_WORLD, &size); + if (size == 1) { + throw std::runtime_error(what.str()); + } + fprintf(stderr, "%s\n", what.str().c_str()); + errexit(); + return false; +} + +/** + * @brief Fatal error handler that closes an open file and queries the + * message associated with an MPI error code. + * On 1 MPI rank the error is recoverable and an exception is thrown. + * On more than 1 MPI rank the error is not recoverable. + * @param msg Custom error message + * @param fn File path + * @param fp File handle + * @param errnum MPI error code + */ +static bool fatal_error(char const *msg, std::string const &fn, MPI_File *fp, + int errnum) { + // get MPI error message + char buf[MPI_MAX_ERROR_STRING]; + int buf_len; + MPI_Error_string(errnum, buf, &buf_len); + buf[buf_len] = '\0'; + // close file handle + if (fp) { + MPI_File_close(fp); + } + return fatal_error(msg, fn, buf); +} + +/** + * @brief Dump data @p arr of size @p len starting from prefix @p pref + * of type @p T using @p MPI_T as MPI datatype. Beware, that @p T and + * @p MPI_T have to match! * - * \param fn The file name to dump to. Must not exist already - * \param arr The array to dump - * \param len The number of elements to dump - * \param pref The prefix for this process - * \param MPI_T The MPI_Datatype corresponding to the template parameter T. + * @param fn The file name to write to (must not already exist!) + * @param arr The array to dump + * @param len The number of elements to dump + * @param pref The prefix for this process + * @param MPI_T The MPI datatype corresponding to the template parameter @p T */ template -static void mpiio_dump_array(const std::string &fn, T *arr, std::size_t len, - std::size_t pref, MPI_Datatype MPI_T) { +static void mpiio_dump_array(const std::string &fn, T const *arr, + std::size_t len, std::size_t pref, + MPI_Datatype MPI_T) { MPI_File f; int ret; - ret = MPI_File_open(MPI_COMM_WORLD, const_cast(fn.c_str()), // MPI_MODE_EXCL: Prohibit overwriting MPI_MODE_WRONLY | MPI_MODE_CREATE | MPI_MODE_EXCL, MPI_INFO_NULL, &f); if (ret) { - char buf[MPI_MAX_ERROR_STRING]; - int buf_len; - MPI_Error_string(ret, buf, &buf_len); - buf[buf_len] = '\0'; - fprintf(stderr, "MPI-IO Error: Could not open file \"%s\": %s\n", - fn.c_str(), buf); - errexit(); + fatal_error("Could not open file", fn, &f, ret); } - ret = MPI_File_set_view(f, pref * sizeof(T), MPI_T, MPI_T, - const_cast("native"), MPI_INFO_NULL); - ret |= MPI_File_write_all(f, arr, len, MPI_T, MPI_STATUS_IGNORE); + auto const offset = + static_cast(pref) * static_cast(sizeof(T)); + ret = MPI_File_set_view(f, offset, MPI_T, MPI_T, const_cast("native"), + MPI_INFO_NULL); + ret |= MPI_File_write_all(f, arr, static_cast(len), MPI_T, + MPI_STATUS_IGNORE); + static_cast(ret and fatal_error("Could not write file", fn, &f, ret)); MPI_File_close(&f); - if (ret) { - fprintf(stderr, "MPI-IO Error: Could not write file \"%s\".\n", fn.c_str()); - errexit(); - } } -/** Dumps some generic infos like the dumped fields and info to process - * the bond information offline (without ESPResSo). To be called by the - * head node only. +/** + * @brief Calculate the file offset on the local node. + * @param n_items Number of items on the local node. + * @return The number of items on all nodes with lower rank. + */ +static unsigned long mpi_calculate_file_offset(unsigned long n_items) { + unsigned long offset = 0ul; + MPI_Exscan(&n_items, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + return offset; +} + +/** + * @brief Dump the fields and bond information. + * To be called by the head node only. * - * \param fn The filename to write to - * \param fields The dumped fields + * @param fn The filename to write to + * @param fields The dumped fields */ static void dump_info(const std::string &fn, unsigned fields) { + // MPI-IO requires consecutive bond ids + auto const nbonds = bonded_ia_params.size(); + assert(static_cast(bonded_ia_params.get_next_key()) == nbonds); + FILE *f = fopen(fn.c_str(), "wb"); if (!f) { - fprintf(stderr, "MPI-IO Error: Could not open %s for writing.\n", - fn.c_str()); - errexit(); + fatal_error("Could not open file", fn); } static std::vector npartners; - int success = (fwrite(&fields, sizeof(fields), 1, f) == 1); + bool success = (fwrite(&fields, sizeof(fields), 1u, f) == 1); // Pack the necessary information of bonded_ia_params: // The number of partners. This is needed to interpret the bond IntList. - if (bonded_ia_params.size() > npartners.size()) - npartners.resize(bonded_ia_params.size()); + if (nbonds > npartners.size()) + npartners.resize(nbonds); - for (int i = 0; i < bonded_ia_params.size(); ++i) { - npartners[i] = number_of_partners(*bonded_ia_params.at(i)); + auto npartners_it = npartners.begin(); + for (int i = 0; i < bonded_ia_params.get_next_key(); ++i, ++npartners_it) { + *npartners_it = number_of_partners(*bonded_ia_params.at(i)); } - auto ia_params_size = static_cast(bonded_ia_params.size()); + success = success && (fwrite(&nbonds, sizeof(std::size_t), 1u, f) == 1); success = - success && (fwrite(&ia_params_size, sizeof(std::size_t), 1, f) == 1); - success = - success && (fwrite(npartners.data(), sizeof(int), bonded_ia_params.size(), - f) == bonded_ia_params.size()); + success && (fwrite(npartners.data(), sizeof(int), nbonds, f) == nbonds); fclose(f); - if (!success) { - fprintf(stderr, "MPI-IO Error: Failed to write %s.\n", fn.c_str()); - errexit(); - } + static_cast(success or fatal_error("Could not write file", fn)); } -void mpi_mpiio_common_write(const char *filename, unsigned fields, +void mpi_mpiio_common_write(const std::string &prefix, unsigned fields, const ParticleRange &particles) { - std::string fnam(filename); - int const nlocalpart = static_cast(particles.size()); - // Keep static buffers in order not having to allocate them on every + auto const nlocalpart = static_cast(particles.size()); + auto const offset = mpi_calculate_file_offset(nlocalpart); + // Keep static buffers in order to avoid allocating them on every // function call static std::vector pos, vel; static std::vector id, type; - // Nlocalpart prefixes - // Prefixes based for arrays: 3 * pref for vel, pos. - int pref = 0, bpref = 0; - MPI_Exscan(&nlocalpart, &pref, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); - // Realloc static buffers if necessary if (nlocalpart > id.size()) id.resize(nlocalpart); - if (fields & MPIIO_OUT_POS && 3 * nlocalpart > pos.size()) - pos.resize(3 * nlocalpart); - if (fields & MPIIO_OUT_VEL && 3 * nlocalpart > vel.size()) - vel.resize(3 * nlocalpart); + if (fields & MPIIO_OUT_POS && 3ul * nlocalpart > pos.size()) + pos.resize(3ul * nlocalpart); + if (fields & MPIIO_OUT_VEL && 3ul * nlocalpart > vel.size()) + vel.resize(3ul * nlocalpart); if (fields & MPIIO_OUT_TYP && nlocalpart > type.size()) type.resize(nlocalpart); // Pack the necessary information - // Esp. rescale the velocities. - int i1 = 0, i3 = 0; + auto id_it = id.begin(); + auto type_it = type.begin(); + auto pos_it = pos.begin(); + auto vel_it = vel.begin(); for (auto const &p : particles) { - id[i1] = p.p.identity; + *id_it = p.id(); + ++id_it; if (fields & MPIIO_OUT_POS) { - pos[i3] = p.r.p[0]; - pos[i3 + 1] = p.r.p[1]; - pos[i3 + 2] = p.r.p[2]; + std::copy_n(std::begin(p.pos()), 3u, pos_it); + pos_it += 3u; } if (fields & MPIIO_OUT_VEL) { - vel[i3] = p.m.v[0]; - vel[i3 + 1] = p.m.v[1]; - vel[i3 + 2] = p.m.v[2]; + std::copy_n(std::begin(p.v()), 3u, vel_it); + vel_it += 3u; } if (fields & MPIIO_OUT_TYP) { - type[i1] = p.p.type; + *type_it = p.type(); + ++type_it; } - i1++; - i3 += 3; } int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 0) - dump_info(fnam + ".head", fields); - mpiio_dump_array(fnam + ".pref", &pref, 1, rank, MPI_INT); - mpiio_dump_array(fnam + ".id", id.data(), nlocalpart, pref, MPI_INT); + dump_info(prefix + ".head", fields); + auto const pref_offset = static_cast(rank); + mpiio_dump_array(prefix + ".pref", &offset, 1ul, pref_offset, + MPI_UNSIGNED_LONG); + mpiio_dump_array(prefix + ".id", id.data(), nlocalpart, offset, MPI_INT); if (fields & MPIIO_OUT_POS) - mpiio_dump_array(fnam + ".pos", pos.data(), 3 * nlocalpart, - 3 * pref, MPI_DOUBLE); + mpiio_dump_array(prefix + ".pos", pos.data(), 3ul * nlocalpart, + 3ul * offset, MPI_DOUBLE); if (fields & MPIIO_OUT_VEL) - mpiio_dump_array(fnam + ".vel", vel.data(), 3 * nlocalpart, - 3 * pref, MPI_DOUBLE); + mpiio_dump_array(prefix + ".vel", vel.data(), 3ul * nlocalpart, + 3ul * offset, MPI_DOUBLE); if (fields & MPIIO_OUT_TYP) - mpiio_dump_array(fnam + ".type", type.data(), nlocalpart, pref, + mpiio_dump_array(prefix + ".type", type.data(), nlocalpart, offset, MPI_INT); if (fields & MPIIO_OUT_BND) { @@ -230,151 +286,149 @@ void mpi_mpiio_common_write(const char *filename, unsigned fields, } // Determine the prefixes in the bond file - int bonds_size = static_cast(bonds.size()); - MPI_Exscan(&bonds_size, &bpref, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + auto const bonds_size = static_cast(bonds.size()); + auto const bonds_offset = mpi_calculate_file_offset(bonds_size); - mpiio_dump_array(fnam + ".boff", &bonds_size, 1, rank, MPI_INT); - mpiio_dump_array(fnam + ".bond", bonds.data(), bonds.size(), bpref, - MPI_CHAR); + mpiio_dump_array(prefix + ".boff", &bonds_size, 1ul, + pref_offset, MPI_UNSIGNED_LONG); + mpiio_dump_array(prefix + ".bond", bonds.data(), bonds.size(), + bonds_offset, MPI_CHAR); } } -/** Get the number of elements in a file by its file size and - * elem_sz. I.e. query the file size using stat(2) and divide it by - * elem_sz. +/** + * @brief Get the number of elements in a file by its file size and @p elem_sz. + * I.e. query the file size using stat(2) and divide it by @p elem_sz. * - * \param fn The filename - * \param elem_sz Sizeof a single element - * \return The number of elements stored binary in the file + * @param fn The filename + * @param elem_sz Size of a single element + * @return The number of elements stored in the file */ -static int get_num_elem(const std::string &fn, std::size_t elem_sz) { +static unsigned long get_num_elem(const std::string &fn, std::size_t elem_sz) { // Could also be done via MPI_File_open, MPI_File_get_size, // MPI_File_close. struct stat st; errno = 0; if (stat(fn.c_str(), &st) != 0) { - fprintf(stderr, "MPI-IO Input Error: Could not get file size of %s: %s\n", - fn.c_str(), strerror(errno)); - errexit(); + auto const reason = strerror(errno); + fatal_error("Could not get file size of", fn, reason); } - return static_cast(st.st_size / elem_sz); + return static_cast(st.st_size) / elem_sz; } -/** Reads a previously dumped array of size len starting from prefix - * pref of type T using MPI_T as MPI datatype. Beware, that T and MPI_T - * have to match! +/** + * @brief Read a previously dumped array of size @p len starting from prefix + * @p pref of type @p T using @p MPI_T as MPI datatype. Beware, that + * @p T and @p MPI_T have to match! + * + * @param fn The file name to read from + * @param arr The array to populate + * @param len The number of elements to read + * @param pref The prefix for this process + * @param MPI_T The MPI datatype corresponding to the template parameter @p T */ template static void mpiio_read_array(const std::string &fn, T *arr, std::size_t len, std::size_t pref, MPI_Datatype MPI_T) { MPI_File f; int ret; - ret = MPI_File_open(MPI_COMM_WORLD, const_cast(fn.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &f); - if (ret) { - char buf[MPI_MAX_ERROR_STRING]; - int buf_len; - MPI_Error_string(ret, buf, &buf_len); - buf[buf_len] = '\0'; - fprintf(stderr, "MPI-IO Error: Could not open file \"%s\": %s\n", - fn.c_str(), buf); - errexit(); + fatal_error("Could not open file", fn, &f, ret); } - ret = MPI_File_set_view(f, pref * sizeof(T), MPI_T, MPI_T, - const_cast("native"), MPI_INFO_NULL); - - ret |= MPI_File_read_all(f, arr, len, MPI_T, MPI_STATUS_IGNORE); + auto const offset = + static_cast(pref) * static_cast(sizeof(T)); + ret = MPI_File_set_view(f, offset, MPI_T, MPI_T, const_cast("native"), + MPI_INFO_NULL); + + ret |= MPI_File_read_all(f, arr, static_cast(len), MPI_T, + MPI_STATUS_IGNORE); + static_cast(ret and fatal_error("Could not read file", fn, &f, ret)); MPI_File_close(&f); - if (ret) { - fprintf(stderr, "MPI-IO Error: Could not read file \"%s\".\n", fn.c_str()); - errexit(); - } } -/** Read the header file and store the information in the pointer - * "field". To be called by all processes. +/** + * @brief Read the header file and return the first value. + * To be called by all processes. * - * \param fn Filename of the head file - * \param rank The rank of the current process in MPI_COMM_WORLD - * \param fields Pointer to store the fields to + * @param fn Filename of the head file + * @param rank The rank of the current process in @c MPI_COMM_WORLD */ -static void read_head(const std::string &fn, int rank, unsigned *fields) { +static unsigned read_head(const std::string &fn, int rank) { + unsigned n_fields = 0u; FILE *f = nullptr; if (rank == 0) { - if (!(f = fopen(fn.c_str(), "rb"))) { - fprintf(stderr, "MPI-IO: Could not open %s.head.\n", fn.c_str()); - errexit(); - } - if (fread((void *)fields, sizeof(unsigned), 1, f) != 1) { - fprintf(stderr, "MPI-IO: Read on %s.head failed.\n", fn.c_str()); - errexit(); - } - MPI_Bcast(fields, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); + f = fopen(fn.c_str(), "rb"); + static_cast(not f and fatal_error("Could not open file", fn)); + auto const n = fread(static_cast(&n_fields), sizeof n_fields, 1, f); + static_cast((n == 1) or fatal_error("Could not read file", fn)); + } + MPI_Bcast(&n_fields, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); + if (f) { fclose(f); - } else { - MPI_Bcast(fields, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); } + return n_fields; } -/** Reads the pref file and fills pref and nlocalpart with their - * corresponding values. Needs to be called by all processes. +/** + * @brief Read the pref file. + * Needs to be called by all processes. * - * \param fn The file name of the prefs file - * \param rank The rank of the current process in MPI_COMM_WORLD - * \param size The size of MPI_COMM_WORLD - * \param nglobalpart The global amount of particles - * \param pref Pointer to store the prefix to - * \param nlocalpart Pointer to store the amount of local particles to + * @param fn The file name of the prefs file + * @param rank The rank of the current process in @c MPI_COMM_WORLD + * @param size The size of @c MPI_COMM_WORLD + * @param nglobalpart The global amount of particles + * @return The prefix and the local number of particles. */ -static void read_prefs(const std::string &fn, int rank, int size, - int nglobalpart, int *pref, int *nlocalpart) { - mpiio_read_array(fn, pref, 1, rank, MPI_INT); +static std::tuple +read_prefs(const std::string &fn, int rank, int size, + unsigned long nglobalpart) { + auto const pref_offset = static_cast(rank); + unsigned long pref = 0ul; + unsigned long nlocalpart = 0ul; + mpiio_read_array(fn, &pref, 1ul, pref_offset, + MPI_UNSIGNED_LONG); if (rank > 0) - MPI_Send(pref, 1, MPI_INT, rank - 1, 0, MPI_COMM_WORLD); + MPI_Send(&pref, 1, MPI_UNSIGNED_LONG, rank - 1, 0, MPI_COMM_WORLD); if (rank < size - 1) - MPI_Recv(nlocalpart, 1, MPI_INT, rank + 1, MPI_ANY_TAG, MPI_COMM_WORLD, - MPI_STATUS_IGNORE); + MPI_Recv(&nlocalpart, 1, MPI_UNSIGNED_LONG, rank + 1, MPI_ANY_TAG, + MPI_COMM_WORLD, MPI_STATUS_IGNORE); else - *nlocalpart = nglobalpart; - *nlocalpart -= *pref; + nlocalpart = nglobalpart; + nlocalpart -= pref; + return {pref, nlocalpart}; } -void mpi_mpiio_common_read(const char *filename, unsigned fields) { - std::string fnam(filename); - +void mpi_mpiio_common_read(const std::string &prefix, unsigned fields) { cell_structure.remove_all_particles(); int size, rank; MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); - auto const nproc = get_num_elem(fnam + ".pref", sizeof(int)); - auto const nglobalpart = get_num_elem(fnam + ".id", sizeof(int)); + auto const nproc = get_num_elem(prefix + ".pref", sizeof(unsigned long)); + auto const nglobalpart = get_num_elem(prefix + ".id", sizeof(int)); - if (rank == 0 && nproc != size) { - fprintf(stderr, "MPI-IO Error: Trying to read a file with a different COMM " - "size than at point of writing.\n"); - errexit(); + if (rank == 0 && nproc != static_cast(size)) { + fatal_error("Trying to read a file with a different COMM " + "size than at point of writing."); } // 1.head on head node: // Read head to determine fields at time of writing. // Compare this var to the current fields. - unsigned avail_fields; - read_head(fnam + ".head", rank, &avail_fields); + auto const avail_fields = read_head(prefix + ".head", rank); if (rank == 0 && (fields & avail_fields) != fields) { - fprintf(stderr, - "MPI-IO Error: Requesting to read fields which were not dumped.\n"); - errexit(); + fatal_error("Requesting to read fields which were not dumped."); } // 1.pref on all nodes: // Read own prefix (1 int at prefix rank). // Communicate own prefix to rank-1 // Determine nlocalpart (prefix of rank+1 - own prefix) on every node. - int pref, nlocalpart; - read_prefs(fnam + ".pref", rank, size, nglobalpart, &pref, &nlocalpart); + unsigned long pref, nlocalpart; + std::tie(pref, nlocalpart) = + read_prefs(prefix + ".pref", rank, size, nglobalpart); std::vector particles(nlocalpart); @@ -382,23 +436,26 @@ void mpi_mpiio_common_read(const char *filename, unsigned fields) { // 1.id on all nodes: // Read nlocalpart ints at defined prefix. std::vector id(nlocalpart); - mpiio_read_array(fnam + ".id", id.data(), nlocalpart, pref, MPI_INT); + auto id_it = id.begin(); + mpiio_read_array(prefix + ".id", id.data(), nlocalpart, pref, MPI_INT); - for (int i = 0; i < nlocalpart; ++i) { - particles[i].p.identity = id[i]; + for (auto &p : particles) { + p.id() = *id_it; + ++id_it; } } if (fields & MPIIO_OUT_POS) { // 1.pos on all nodes: // Read nlocalpart * 3 doubles at defined prefix * 3 - std::vector pos(3 * nlocalpart); - mpiio_read_array(fnam + ".pos", pos.data(), 3 * nlocalpart, - 3 * pref, MPI_DOUBLE); + std::vector pos(3ul * nlocalpart); + auto pos_it = pos.begin(); + mpiio_read_array(prefix + ".pos", pos.data(), 3ul * nlocalpart, + 3ul * pref, MPI_DOUBLE); - for (int i = 0; i < nlocalpart; ++i) { - particles[i].r.p = - Utils::Vector3d{pos[3 * i + 0], pos[3 * i + 1], pos[3 * i + 2]}; + for (auto &p : particles) { + std::copy_n(pos_it, 3u, std::begin(p.pos())); + pos_it += 3u; } } @@ -406,42 +463,47 @@ void mpi_mpiio_common_read(const char *filename, unsigned fields) { // 1.type on all nodes: // Read nlocalpart ints at defined prefix. std::vector type(nlocalpart); - mpiio_read_array(fnam + ".type", type.data(), nlocalpart, pref, + auto type_it = type.begin(); + mpiio_read_array(prefix + ".type", type.data(), nlocalpart, pref, MPI_INT); - for (int i = 0; i < nlocalpart; ++i) - particles[i].p.type = type[i]; + for (auto &p : particles) { + p.type() = *type_it; + ++type_it; + } } if (fields & MPIIO_OUT_VEL) { // 1.vel on all nodes: // Read nlocalpart * 3 doubles at defined prefix * 3 - std::vector vel(3 * nlocalpart); - mpiio_read_array(fnam + ".vel", vel.data(), 3 * nlocalpart, - 3 * pref, MPI_DOUBLE); + std::vector vel(3ul * nlocalpart); + auto vel_it = vel.begin(); + mpiio_read_array(prefix + ".vel", vel.data(), 3ul * nlocalpart, + 3ul * pref, MPI_DOUBLE); - for (int i = 0; i < nlocalpart; ++i) - particles[i].m.v = - Utils::Vector3d{vel[3 * i + 0], vel[3 * i + 1], vel[3 * i + 2]}; + for (auto &p : particles) { + std::copy_n(vel_it, 3u, std::begin(p.v())); + vel_it += 3u; + } } if (fields & MPIIO_OUT_BND) { // 1.boff - // 1 int per process - int bonds_size = 0; - mpiio_read_array(fnam + ".boff", &bonds_size, 1, rank, MPI_INT); - int bpref = 0; - MPI_Exscan(&bonds_size, &bpref, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + // 1 long int per process + auto const pref_offset = static_cast(rank); + unsigned long bonds_size = 0u; + mpiio_read_array(prefix + ".boff", &bonds_size, 1ul, + pref_offset, MPI_UNSIGNED_LONG); + auto const bonds_offset = mpi_calculate_file_offset(bonds_size); // 1.bond // nlocalbonds ints per process std::vector bond(bonds_size); - mpiio_read_array(fnam + ".bond", bond.data(), bonds_size, bpref, - MPI_CHAR); + mpiio_read_array(prefix + ".bond", bond.data(), bonds_size, + bonds_offset, MPI_CHAR); - namespace io = boost::iostreams; - io::array_source src(bond.data(), bond.size()); - io::stream ss(src); + boost::iostreams::array_source src(bond.data(), bond.size()); + boost::iostreams::stream ss(src); boost::archive::binary_iarchive ia(ss); for (auto &p : particles) { diff --git a/src/core/io/mpiio/mpiio.hpp b/src/core/io/mpiio/mpiio.hpp index be678886bd5..3ab09275416 100644 --- a/src/core/io/mpiio/mpiio.hpp +++ b/src/core/io/mpiio/mpiio.hpp @@ -18,44 +18,55 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -/** \file +#ifndef CORE_IO_MPIIO_MPIIO_HPP +#define CORE_IO_MPIIO_MPIIO_HPP + +/** @file * Implements binary output using MPI-IO. */ -#ifndef _MPIIO_HPP -#define _MPIIO_HPP - #include "ParticleRange.hpp" + +#include + namespace Mpiio { -/** Constants which indicate what to output. To indicate the output of - * multiple fields, OR the corresponding values. - * +/** + * @brief Constants which indicate what to output. + * To indicate the output of multiple fields, OR the + * corresponding values. */ enum MPIIOOutputFields : unsigned int { + MPIIO_OUT_NON = 0u, MPIIO_OUT_POS = 1u, MPIIO_OUT_VEL = 2u, MPIIO_OUT_TYP = 4u, MPIIO_OUT_BND = 8u, }; -/** Parallel binary output using MPI-IO. To be called by all MPI - * processes. Aborts ESPResSo if an error occurs. +/** + * @brief Parallel binary output using MPI-IO. + * To be called by all MPI processes. Aborts ESPResSo if an error occurs. + * On 1 MPI rank, the error is converted to a runtime error and can be + * recovered by removing any file that may have already been written. * - * \param filename A null-terminated filename prefix. - * \param fields Output specifier which fields to dump. - * \param particles range of particles to serialize. + * @param prefix Filepath prefix. + * @param fields Specifier for which fields to dump. + * @param particles Range of particles to serialize. */ -void mpi_mpiio_common_write(const char *filename, unsigned fields, +void mpi_mpiio_common_write(const std::string &prefix, unsigned fields, const ParticleRange &particles); -/** Parallel binary input using MPI-IO. To be called by all MPI - * processes. Aborts ESPResSo if an error occurs. +/** + * @brief Parallel binary input using MPI-IO. + * To be called by all MPI processes. Aborts ESPResSo if an error occurs. + * On 1 MPI rank, the error is converted to a runtime error and can be + * recovered. * - * \param filename A null-terminated filename prefix. - * \param fields Specifier which fields to read. + * @param prefix Filepath prefix. + * @param fields Specifier for which fields to read. */ -void mpi_mpiio_common_read(const char *filename, unsigned fields); +void mpi_mpiio_common_read(const std::string &prefix, unsigned fields); } // namespace Mpiio diff --git a/src/core/io/writer/h5md_core.cpp b/src/core/io/writer/h5md_core.cpp index b25232484ff..d2c0328062d 100644 --- a/src/core/io/writer/h5md_core.cpp +++ b/src/core/io/writer/h5md_core.cpp @@ -188,8 +188,7 @@ void File::load_file(const std::string &file_path) { load_datasets(); } -void write_box(const BoxGeometry &geometry, const h5xx::file &h5md_file, - h5xx::dataset &dataset) { +static void write_box(const BoxGeometry &geometry, h5xx::dataset &dataset) { auto const extents = static_cast(dataset).extents(); extend_dataset(dataset, Vector2hs{1, 0}); h5xx::write_dataset(dataset, geometry.length(), @@ -199,7 +198,8 @@ void write_box(const BoxGeometry &geometry, const h5xx::file &h5md_file, void write_attributes(const std::string &espresso_version, h5xx::file &h5md_file) { auto h5md_group = h5xx::group(h5md_file, "h5md"); - h5xx::write_attribute(h5md_group, "version", boost::array{1, 1}); + h5xx::write_attribute(h5md_group, "version", + boost::array{{1, 1}}); auto h5md_creator_group = h5xx::group(h5md_group, "creator"); h5xx::write_attribute(h5md_creator_group, "name", "ESPResSo"); h5xx::write_attribute(h5md_creator_group, "version", espresso_version); @@ -268,7 +268,7 @@ template struct slice_info {}; template <> struct slice_info<3> { static auto extent(hsize_t n_part_diff) { return Vector3hs{1, n_part_diff, 0}; - }; + } static constexpr auto count() { return Vector3hs{1, 1, 3}; } static auto offset(hsize_t n_time_steps, hsize_t prefix) { return Vector3hs{n_time_steps, prefix, 0}; @@ -276,7 +276,7 @@ template <> struct slice_info<3> { }; template <> struct slice_info<2> { - static auto extent(hsize_t n_part_diff) { return Vector2hs{1, n_part_diff}; }; + static auto extent(hsize_t n_part_diff) { return Vector2hs{1, n_part_diff}; } static constexpr auto count() { return Vector2hs{1, 1}; } static auto offset(hsize_t n_time_steps, hsize_t prefix) { return Vector2hs{n_time_steps, prefix}; @@ -304,7 +304,7 @@ void write_td_particle_property(hsize_t prefix, hsize_t n_part_global, void File::write(const ParticleRange &particles, double time, int step, BoxGeometry const &geometry) { - write_box(geometry, m_h5md_file, datasets["particles/atoms/box/edges/value"]); + write_box(geometry, datasets["particles/atoms/box/edges/value"]); write_connectivity(particles); int const n_part_local = particles.size(); diff --git a/src/core/io/writer/h5md_core.hpp b/src/core/io/writer/h5md_core.hpp index cd58125f9a7..c912019fbb7 100644 --- a/src/core/io/writer/h5md_core.hpp +++ b/src/core/io/writer/h5md_core.hpp @@ -75,7 +75,7 @@ class File { m_velocity_unit(std::move(velocity_unit)), m_charge_unit(std::move(charge_unit)), m_comm(std::move(comm)) { init_file(file_path); - }; + } ~File() = default; /** @@ -99,49 +99,49 @@ class File { * @brief Retrieve the path to the hdf5 file. * @return The path as a string. */ - std::string file_path() const { return m_h5md_file.name(); }; + std::string file_path() const { return m_h5md_file.name(); } /** * @brief Retrieve the path to the simulation script. * @return The path as a string. */ - std::string &script_path() { return m_script_path; }; + std::string &script_path() { return m_script_path; } /** * @brief Retrieve the set mass unit. * @return The unit as a string. */ - std::string &mass_unit() { return m_mass_unit; }; + std::string &mass_unit() { return m_mass_unit; } /** * @brief Retrieve the set length unit. * @return The unit as a string. */ - std::string &length_unit() { return m_length_unit; }; + std::string &length_unit() { return m_length_unit; } /** * @brief Retrieve the set time unit. * @return The unit as a string. */ - std::string &time_unit() { return m_time_unit; }; + std::string &time_unit() { return m_time_unit; } /** * @brief Retrieve the set force unit. * @return The unit as a string. */ - std::string &force_unit() { return m_force_unit; }; + std::string &force_unit() { return m_force_unit; } /** * @brief Retrieve the set velocity unit. * @return The unit as a string. */ - std::string &velocity_unit() { return m_velocity_unit; }; + std::string &velocity_unit() { return m_velocity_unit; } /** * @brief Retrieve the set charge unit. * @return The unit as a string. */ - std::string &charge_unit() { return m_charge_unit; }; + std::string &charge_unit() { return m_charge_unit; } /** * @brief Method to enforce flushing the buffer to disk. diff --git a/src/core/nonbonded_interactions/VerletCriterion.hpp b/src/core/nonbonded_interactions/VerletCriterion.hpp index cb770106d10..d6a62a40f03 100644 --- a/src/core/nonbonded_interactions/VerletCriterion.hpp +++ b/src/core/nonbonded_interactions/VerletCriterion.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _VERLETCRITERION_HPP -#define _VERLETCRITERION_HPP +#ifndef CORE_NB_IA_VERLETCRITERION_HPP +#define CORE_NB_IA_VERLETCRITERION_HPP #include "Particle.hpp" #include "config.hpp" @@ -28,10 +28,16 @@ #include #include +struct GetNonbondedCutoff { + auto operator()(int type_i, int type_j) const { + return get_ia_param(type_i, type_j)->max_cut; + } +}; + /** Returns true if the particles are to be considered for short range * interactions. */ -class VerletCriterion { +template class VerletCriterion { const double m_skin; const double m_eff_max_cut2; const double m_eff_coulomb_cut2 = 0.; @@ -42,6 +48,7 @@ class VerletCriterion { return INACTIVE_CUTOFF; return Utils::sqr(x + m_skin); } + CutoffGetter get_nonbonded_cutoff; public: VerletCriterion(double skin, double max_cut, double coulomb_cut = 0., @@ -78,9 +85,9 @@ class VerletCriterion { #endif // Within short-range distance (including dpd and the like) - auto const max_cut = get_ia_param(p1.p.type, p2.p.type)->max_cut; - return (max_cut != INACTIVE_CUTOFF) && - (dist2 <= Utils::sqr(max_cut + m_skin)); + auto const ia_cut = get_nonbonded_cutoff(p1.p.type, p2.p.type); + return (ia_cut != INACTIVE_CUTOFF) && + (dist2 <= Utils::sqr(ia_cut + m_skin)); } }; #endif diff --git a/src/core/nonbonded_interactions/gay_berne.hpp b/src/core/nonbonded_interactions/gay_berne.hpp index 33cc0b5fa97..7e17e15079c 100644 --- a/src/core/nonbonded_interactions/gay_berne.hpp +++ b/src/core/nonbonded_interactions/gay_berne.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _GB_HPP -#define _GB_HPP +#ifndef CORE_NB_IA_GB_HPP +#define CORE_NB_IA_GB_HPP /** \file * Routines to calculate the Gay-Berne potential between particle pairs. @@ -175,5 +175,5 @@ inline double gb_pair_energy(Utils::Vector3d const &ui, return E(r_eff(dist)) - E(r_eff(ia_params.gay_berne.cut)); } -#endif +#endif // GAY_BERNE #endif diff --git a/src/core/nonbonded_interactions/lj.hpp b/src/core/nonbonded_interactions/lj.hpp index 57fd4205933..ee07a3b7fde 100644 --- a/src/core/nonbonded_interactions/lj.hpp +++ b/src/core/nonbonded_interactions/lj.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _LJ_H -#define _LJ_H +#ifndef CORE_NB_IA_LJ_HPP +#define CORE_NB_IA_LJ_HPP #include "config.hpp" diff --git a/src/core/nonbonded_interactions/ljcos.hpp b/src/core/nonbonded_interactions/ljcos.hpp index 5a0821e9db5..8c0762befae 100644 --- a/src/core/nonbonded_interactions/ljcos.hpp +++ b/src/core/nonbonded_interactions/ljcos.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _LJCOS_H -#define _LJCOS_H +#ifndef CORE_NB_IA_LJCOS_HPP +#define CORE_NB_IA_LJCOS_HPP /** \file * Routines to calculate the Lennard-Jones+cosine potential between * particle pairs. @@ -73,15 +73,13 @@ inline double ljcos_pair_energy(IA_parameters const &ia_params, double dist) { return 4.0 * ia_params.ljcos.eps * (Utils::sqr(frac6) - frac6); } /* cosine part of the potential. */ - if (dist < (ia_params.ljcos.cut + ia_params.ljcos.offset)) { - return .5 * ia_params.ljcos.eps * - (cos(ia_params.ljcos.alfa * Utils::sqr(r_off) + - ia_params.ljcos.beta) - - 1.); - } + return .5 * ia_params.ljcos.eps * + (cos(ia_params.ljcos.alfa * Utils::sqr(r_off) + + ia_params.ljcos.beta) - + 1.); } return 0.0; } -#endif +#endif // LJCOS #endif diff --git a/src/core/nonbonded_interactions/ljcos2.hpp b/src/core/nonbonded_interactions/ljcos2.hpp index b54dbdb61fd..d53bf488661 100644 --- a/src/core/nonbonded_interactions/ljcos2.hpp +++ b/src/core/nonbonded_interactions/ljcos2.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _LJCOS2_H -#define _LJCOS2_H +#ifndef CORE_NB_IA_LJCOS2_HPP +#define CORE_NB_IA_LJCOS2_HPP /** \file * Routines to calculate the Lennard-Jones with cosine tail potential diff --git a/src/core/nonbonded_interactions/ljgen.hpp b/src/core/nonbonded_interactions/ljgen.hpp index 034a390da46..2db9d517bbd 100644 --- a/src/core/nonbonded_interactions/ljgen.hpp +++ b/src/core/nonbonded_interactions/ljgen.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _LJGEN_H -#define _LJGEN_H +#ifndef CORE_NB_IA_LJGEN_HPP +#define CORE_NB_IA_LJGEN_HPP /** \file * Routines to calculate the generalized Lennard-Jones potential between @@ -111,7 +111,5 @@ inline double ljgen_pair_energy(IA_parameters const &ia_params, double dist) { return 0.0; } -#endif - -/* LJGEN_H */ -#endif +#endif // LENNARD_JONES_GENERIC +#endif // CORE_NB_IA_LJGEN_HPP diff --git a/src/core/nonbonded_interactions/morse.hpp b/src/core/nonbonded_interactions/morse.hpp index eae871f9afa..a571f54510e 100644 --- a/src/core/nonbonded_interactions/morse.hpp +++ b/src/core/nonbonded_interactions/morse.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _MORSE_H -#define _MORSE_H +#ifndef CORE_NB_IA_MORSE_HPP +#define CORE_NB_IA_MORSE_HPP /** \file * Routines to calculate the Morse potential between particle pairs. @@ -65,4 +65,4 @@ inline double morse_pair_energy(IA_parameters const &ia_params, double dist) { } #endif /* ifdef MORSE */ -#endif /* ifdef _MORSE_H */ +#endif /* ifdef CORE_NB_IA_MORSE_HPP */ diff --git a/src/core/nonbonded_interactions/nonbonded_interaction_data.cpp b/src/core/nonbonded_interactions/nonbonded_interaction_data.cpp index 4068eccf5f4..bbeeeae88d8 100644 --- a/src/core/nonbonded_interactions/nonbonded_interaction_data.cpp +++ b/src/core/nonbonded_interactions/nonbonded_interaction_data.cpp @@ -49,7 +49,7 @@ * variables *****************************************/ int max_seen_particle_type = 0; -std::vector ia_params; +std::vector nonbonded_ia_params; double min_global_cut = INACTIVE_CUTOFF; @@ -71,23 +71,23 @@ static void mpi_realloc_ia_params_local(int new_size) { } max_seen_particle_type = new_size; - std::swap(ia_params, new_params); + std::swap(nonbonded_ia_params, new_params); } REGISTER_CALLBACK(mpi_realloc_ia_params_local) -/** Increase the size of the @ref ia_params vector. */ +/** Increase the size of the @ref nonbonded_ia_params vector. */ inline void mpi_realloc_ia_params(int new_size) { mpi_call_all(mpi_realloc_ia_params_local, new_size); } static void mpi_bcast_all_ia_params_local() { - boost::mpi::broadcast(comm_cart, ia_params, 0); + boost::mpi::broadcast(comm_cart, nonbonded_ia_params, 0); } REGISTER_CALLBACK(mpi_bcast_all_ia_params_local) -/** Broadcast @ref ia_params to all nodes. */ +/** Broadcast @ref nonbonded_ia_params to all nodes. */ inline void mpi_bcast_all_ia_params() { mpi_call_all(mpi_bcast_all_ia_params_local); } @@ -100,7 +100,7 @@ IA_parameters *get_ia_param_safe(int i, int j) { std::string ia_params_get_state() { std::stringstream out; boost::archive::binary_oarchive oa(out); - oa << ia_params; + oa << nonbonded_ia_params; oa << max_seen_particle_type; return out.str(); } @@ -110,8 +110,8 @@ void ia_params_set_state(std::string const &state) { iostreams::array_source src(state.data(), state.size()); iostreams::stream ss(src); boost::archive::binary_iarchive ia(ss); - ia_params.clear(); - ia >> ia_params; + nonbonded_ia_params.clear(); + ia >> nonbonded_ia_params; ia >> max_seen_particle_type; mpi_realloc_ia_params(max_seen_particle_type); mpi_bcast_all_ia_params(); @@ -202,7 +202,7 @@ static double recalc_maximal_cutoff(const IA_parameters &data) { double maximal_cutoff_nonbonded() { auto max_cut_nonbonded = INACTIVE_CUTOFF; - for (auto &data : ia_params) { + for (auto &data : nonbonded_ia_params) { data.max_cut = recalc_maximal_cutoff(data); max_cut_nonbonded = std::max(max_cut_nonbonded, data.max_cut); } @@ -211,7 +211,7 @@ double maximal_cutoff_nonbonded() { } void reset_ia_params() { - boost::fill(ia_params, IA_parameters{}); + boost::fill(nonbonded_ia_params, IA_parameters{}); mpi_bcast_all_ia_params(); } diff --git a/src/core/nonbonded_interactions/nonbonded_interaction_data.hpp b/src/core/nonbonded_interactions/nonbonded_interaction_data.hpp index daab467c72e..347fe99e5ce 100644 --- a/src/core/nonbonded_interactions/nonbonded_interaction_data.hpp +++ b/src/core/nonbonded_interactions/nonbonded_interaction_data.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _INTERACTION_DATA_H -#define _INTERACTION_DATA_H +#ifndef CORE_NB_IA_INTERACTION_DATA_HPP +#define CORE_NB_IA_INTERACTION_DATA_HPP /** \file * Various procedures concerning interactions between particles. */ @@ -275,7 +275,7 @@ struct IA_parameters { #endif }; -extern std::vector ia_params; +extern std::vector nonbonded_ia_params; /** Maximal particle type seen so far. */ extern int max_seen_particle_type; @@ -306,8 +306,8 @@ inline IA_parameters *get_ia_param(int i, int j) { assert(i >= 0 && i < max_seen_particle_type); assert(j >= 0 && j < max_seen_particle_type); - return &ia_params[Utils::upper_triangular(std::min(i, j), std::max(i, j), - max_seen_particle_type)]; + return &nonbonded_ia_params[Utils::upper_triangular( + std::min(i, j), std::max(i, j), max_seen_particle_type)]; } /** Get interaction parameters between particle types i and j. diff --git a/src/core/nonbonded_interactions/thole.hpp b/src/core/nonbonded_interactions/thole.hpp index 25c60a93cdf..2e6ff05c2f4 100644 --- a/src/core/nonbonded_interactions/thole.hpp +++ b/src/core/nonbonded_interactions/thole.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _THOLE_H -#define _THOLE_H +#ifndef CORE_NB_IA_THOLE_HPP +#define CORE_NB_IA_THOLE_HPP /** \file * Routines to calculate the Thole damping potential between particle pairs. * See @cite thole81a. @@ -80,10 +80,9 @@ inline double thole_pair_energy(Particle const &p1, Particle const &p2, auto const sd = thole_s * dist; auto const S_r = 1.0 - (1.0 + sd / 2.0) * exp(-sd); // Subtract p3m shortrange energy and add thole energy - return Coulomb::pair_energy(p1, p2, thole_q1q2 * (-1.0 + S_r), d, dist, - dist * dist); + return Coulomb::pair_energy(p1, p2, thole_q1q2 * (-1.0 + S_r), d, dist); } return 0.0; } -#endif +#endif // THOLE #endif diff --git a/src/core/object-in-fluid/oif_global_forces.cpp b/src/core/object-in-fluid/oif_global_forces.cpp index bd7cd4c5d88..7658d5d9356 100644 --- a/src/core/object-in-fluid/oif_global_forces.cpp +++ b/src/core/object-in-fluid/oif_global_forces.cpp @@ -48,13 +48,14 @@ Utils::Vector2d calc_oif_global(int molType, CellStructure &cs) { cs.bond_loop([&partArea, &VOL_partVol, molType](Particle &p1, int bond_id, Utils::Span partners) { - if (p1.p.mol_id != molType) + if (p1.mol_id() != molType) return false; if (boost::get(bonded_ia_params.at(bond_id).get()) != nullptr) { // remaining neighbors fetched - auto const p11 = unfolded_position(p1.r.p, p1.l.i, box_geo.length()); + auto const p11 = + unfolded_position(p1.pos(), p1.image_box(), box_geo.length()); auto const p22 = p11 + box_geo.get_mi_vector(partners[0]->r.p, p11); auto const p33 = p11 + box_geo.get_mi_vector(partners[1]->r.p, p11); @@ -83,12 +84,13 @@ void add_oif_global_forces(Utils::Vector2d const &area_volume, int molType, cs.bond_loop([area, VOL_volume, molType](Particle &p1, int bond_id, Utils::Span partners) { - if (p1.p.mol_id != molType) + if (p1.mol_id() != molType) return false; if (auto const *iaparams = boost::get( bonded_ia_params.at(bond_id).get())) { - auto const p11 = unfolded_position(p1.r.p, p1.l.i, box_geo.length()); + auto const p11 = + unfolded_position(p1.pos(), p1.image_box(), box_geo.length()); auto const p22 = p11 + box_geo.get_mi_vector(partners[0]->r.p, p11); auto const p33 = p11 + box_geo.get_mi_vector(partners[1]->r.p, p11); @@ -117,7 +119,7 @@ void add_oif_global_forces(Utils::Vector2d const &area_volume, int molType, (m1_length * m1_length + m2_length * m2_length + m3_length * m3_length); - p1.f.f += fac * m1 + VOL_force; + p1.force() += fac * m1 + VOL_force; partners[0]->f.f += fac * m2 + VOL_force; partners[1]->f.f += fac * m3 + VOL_force; } diff --git a/src/core/object-in-fluid/oif_global_forces.hpp b/src/core/object-in-fluid/oif_global_forces.hpp index aa8a9dea2f5..5a44b31aa9f 100644 --- a/src/core/object-in-fluid/oif_global_forces.hpp +++ b/src/core/object-in-fluid/oif_global_forces.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _OBJECT_IN_FLUID_OIF_GLOBAL_FORCES_H -#define _OBJECT_IN_FLUID_OIF_GLOBAL_FORCES_H +#ifndef CORE_OBJECT_IN_FLUID_OIF_GLOBAL_FORCES_HPP +#define CORE_OBJECT_IN_FLUID_OIF_GLOBAL_FORCES_HPP /** \file * Routines to calculate the OIF global forces energy or/and and force * for a particle triple (triangle from mesh). See @cite dupin07a. @@ -33,7 +33,7 @@ * - calculates the global area and global volume for a cell before the forces * are handled * - MPI synchronization with all reduce - * - !!! loop over particles from domain_decomposition !!! + * - !!! loop over particles from regular_decomposition !!! */ Utils::Vector2d calc_oif_global(int molType, CellStructure &cs); diff --git a/src/core/object-in-fluid/oif_local_forces.hpp b/src/core/object-in-fluid/oif_local_forces.hpp index e8d482e7852..1af2a03556b 100644 --- a/src/core/object-in-fluid/oif_local_forces.hpp +++ b/src/core/object-in-fluid/oif_local_forces.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _OBJECT_IN_FLUID_OIF_LOCAL_FORCES_H -#define _OBJECT_IN_FLUID_OIF_LOCAL_FORCES_H +#ifndef CORE_OBJECT_IN_FLUID_OIF_LOCAL_FORCES_HPP +#define CORE_OBJECT_IN_FLUID_OIF_LOCAL_FORCES_HPP /** \file * Routines to calculate the OIF local forces for a particle quadruple @@ -118,10 +118,11 @@ OifLocalForcesBond::calc_forces(Particle const &p2, Particle const &p1, Particle const &p3, Particle const &p4) const { // first-fold-then-the-same approach - auto const fp2 = unfolded_position(p2.r.p, p2.l.i, box_geo.length()); - auto const fp1 = fp2 + box_geo.get_mi_vector(p1.r.p, fp2); - auto const fp3 = fp2 + box_geo.get_mi_vector(p3.r.p, fp2); - auto const fp4 = fp2 + box_geo.get_mi_vector(p4.r.p, fp2); + auto const fp2 = + unfolded_position(p2.pos(), p2.image_box(), box_geo.length()); + auto const fp1 = fp2 + box_geo.get_mi_vector(p1.pos(), fp2); + auto const fp3 = fp2 + box_geo.get_mi_vector(p3.pos(), fp2); + auto const fp4 = fp2 + box_geo.get_mi_vector(p4.pos(), fp2); Utils::Vector3d force1{}, force2{}, force3{}, force4{}; @@ -152,7 +153,7 @@ OifLocalForcesBond::calc_forces(Particle const &p2, Particle const &p1, if (kvisc > TINY_OIF_ELASTICITY_COEFFICIENT) { // to be implemented.... auto const dx = fp2 - fp3; auto const len2 = dx.norm2(); - auto const v_ij = p3.m.v - p2.m.v; + auto const v_ij = p3.v() - p2.v(); // Variant A // Here the force is in the direction of relative velocity btw points diff --git a/src/core/observables/BondAngles.hpp b/src/core/observables/BondAngles.hpp index 833afb4b7fe..f77673a1e5e 100644 --- a/src/core/observables/BondAngles.hpp +++ b/src/core/observables/BondAngles.hpp @@ -25,6 +25,8 @@ #include +#include + #include #include #include @@ -47,7 +49,7 @@ class BondAngles : public PidObservable { } std::vector - evaluate(Utils::Span> particles, + evaluate(ParticleReferenceRange particles, const ParticleObservables::traits &traits) const override { std::vector res(n_values()); auto v1 = box_geo.get_mi_vector(traits.position(particles[1]), @@ -56,13 +58,9 @@ class BondAngles : public PidObservable { for (std::size_t i = 0, end = n_values(); i < end; i++) { auto v2 = box_geo.get_mi_vector(traits.position(particles[i + 2]), traits.position(particles[i + 1])); - auto n2 = v2.norm(); - auto cosine = (v1 * v2) / (n1 * n2); - // sanitize cosine value - if (cosine > TINY_COS_VALUE) - cosine = TINY_COS_VALUE; - else if (cosine < -TINY_COS_VALUE) - cosine = -TINY_COS_VALUE; + auto const n2 = v2.norm(); + auto const cosine = boost::algorithm::clamp( + (v1 * v2) / (n1 * n2), -TINY_COS_VALUE, TINY_COS_VALUE); /* to reduce computational time, after calculating an angle ijk, the * vector r_ij takes the value r_jk, but to orient it correctly, it has * to be multiplied -1; it's cheaper to do this operation on a double diff --git a/src/core/observables/BondDihedrals.hpp b/src/core/observables/BondDihedrals.hpp index 721fc220ea5..68ff105d098 100644 --- a/src/core/observables/BondDihedrals.hpp +++ b/src/core/observables/BondDihedrals.hpp @@ -53,7 +53,7 @@ class BondDihedrals : public PidObservable { } std::vector - evaluate(Utils::Span> particles, + evaluate(ParticleReferenceRange particles, const ParticleObservables::traits &traits) const override { std::vector res(n_values()); auto v1 = box_geo.get_mi_vector(traits.position(particles[1]), diff --git a/src/core/observables/CosPersistenceAngles.hpp b/src/core/observables/CosPersistenceAngles.hpp index 4d0c5c46e11..32445f08c28 100644 --- a/src/core/observables/CosPersistenceAngles.hpp +++ b/src/core/observables/CosPersistenceAngles.hpp @@ -49,11 +49,11 @@ class CosPersistenceAngles : public PidObservable { } std::vector - evaluate(Utils::Span> particles, + evaluate(ParticleReferenceRange particles, const ParticleObservables::traits &traits) const override { auto const no_of_angles = n_values(); + auto const no_of_bonds = no_of_angles + 1; std::vector angles(no_of_angles); - auto const no_of_bonds = n_values() + 1; std::vector bond_vectors(no_of_bonds); auto get_bond_vector = [&](auto index) { return box_geo.get_mi_vector(traits.position(particles[index + 1]), diff --git a/src/core/observables/CylindricalDensityProfile.hpp b/src/core/observables/CylindricalDensityProfile.hpp index c347c8f797b..f4e67ee3a55 100644 --- a/src/core/observables/CylindricalDensityProfile.hpp +++ b/src/core/observables/CylindricalDensityProfile.hpp @@ -35,11 +35,11 @@ class CylindricalDensityProfile : public CylindricalPidProfileObservable { public: using CylindricalPidProfileObservable::CylindricalPidProfileObservable; std::vector - evaluate(Utils::Span> particles, + evaluate(ParticleReferenceRange particles, const ParticleObservables::traits &traits) const override { Utils::CylindricalHistogram histogram(n_bins(), limits()); - for (auto p : particles) { + for (auto const &p : particles) { histogram.update(Utils::transform_coordinate_cartesian_to_cylinder( folded_position(traits.position(p), box_geo) - transform_params->center(), diff --git a/src/core/observables/CylindricalLBVelocityProfileAtParticlePositions.cpp b/src/core/observables/CylindricalLBVelocityProfileAtParticlePositions.cpp index 4bb9a0ae7b4..2ba77f5e5c2 100644 --- a/src/core/observables/CylindricalLBVelocityProfileAtParticlePositions.cpp +++ b/src/core/observables/CylindricalLBVelocityProfileAtParticlePositions.cpp @@ -35,7 +35,7 @@ std::vector CylindricalLBVelocityProfileAtParticlePositions::evaluate( const ParticleObservables::traits &traits) const { Utils::CylindricalHistogram histogram(n_bins(), limits()); - for (auto p : particles) { + for (auto const &p : particles) { auto const pos = folded_position(traits.position(p), box_geo); auto const v = lb_lbfluid_get_interpolated_velocity(pos) * lb_lbfluid_get_lattice_speed(); diff --git a/src/core/observables/CylindricalLBVelocityProfileAtParticlePositions.hpp b/src/core/observables/CylindricalLBVelocityProfileAtParticlePositions.hpp index a22617effdf..64a14e63de0 100644 --- a/src/core/observables/CylindricalLBVelocityProfileAtParticlePositions.hpp +++ b/src/core/observables/CylindricalLBVelocityProfileAtParticlePositions.hpp @@ -37,8 +37,8 @@ class CylindricalLBVelocityProfileAtParticlePositions using CylindricalPidProfileObservable::CylindricalPidProfileObservable; std::vector - evaluate(Utils::Span> particles, - const ParticleObservables::traits &traits) const override; + evaluate(ParticleReferenceRange particles, + const ParticleObservables::traits &) const override; std::vector shape() const override { auto const b = n_bins(); diff --git a/src/core/observables/CylindricalVelocityProfile.hpp b/src/core/observables/CylindricalVelocityProfile.hpp index 1dfacfefd9c..e463b85d635 100644 --- a/src/core/observables/CylindricalVelocityProfile.hpp +++ b/src/core/observables/CylindricalVelocityProfile.hpp @@ -38,7 +38,7 @@ class CylindricalVelocityProfile : public CylindricalPidProfileObservable { using CylindricalPidProfileObservable::CylindricalPidProfileObservable; std::vector - evaluate(Utils::Span> particles, + evaluate(ParticleReferenceRange particles, const ParticleObservables::traits &traits) const override { Utils::CylindricalHistogram histogram(n_bins(), limits()); diff --git a/src/core/observables/ForceDensityProfile.hpp b/src/core/observables/ForceDensityProfile.hpp index a94b974a626..f3a86945622 100644 --- a/src/core/observables/ForceDensityProfile.hpp +++ b/src/core/observables/ForceDensityProfile.hpp @@ -42,10 +42,11 @@ class ForceDensityProfile : public PidProfileObservable { std::vector evaluate(ParticleReferenceRange particles, - const ParticleObservables::traits &traits) const override { + const ParticleObservables::traits &) const override { Utils::Histogram histogram(n_bins(), limits()); - for (auto p : particles) { - histogram.update(folded_position(p.get().r.p, box_geo), p.get().f.f); + for (auto const &p : particles) { + histogram.update(folded_position(p.get().pos(), box_geo), + p.get().force()); } histogram.normalize(); return histogram.get_histogram(); diff --git a/src/core/observables/LBProfileObservable.hpp b/src/core/observables/LBProfileObservable.hpp index 14b9007f022..10375349078 100644 --- a/src/core/observables/LBProfileObservable.hpp +++ b/src/core/observables/LBProfileObservable.hpp @@ -43,9 +43,9 @@ class LBProfileObservable : public ProfileObservable { double max_z, bool allow_empty_bins = false) : ProfileObservable(n_x_bins, n_y_bins, n_z_bins, min_x, max_x, min_y, max_y, min_z, max_z), - sampling_delta{sampling_delta_x, sampling_delta_y, sampling_delta_z}, - sampling_offset{sampling_offset_x, sampling_offset_y, - sampling_offset_z}, + sampling_delta{{sampling_delta_x, sampling_delta_y, sampling_delta_z}}, + sampling_offset{ + {sampling_offset_x, sampling_offset_y, sampling_offset_z}}, allow_empty_bins(allow_empty_bins) { if (sampling_delta[0] <= 0.) throw std::domain_error("sampling_delta_x has to be > 0"); @@ -68,10 +68,8 @@ class LBProfileObservable : public ProfileObservable { void calculate_sampling_positions() { auto const lim = limits(); sampling_positions.clear(); - assert(sampling_delta[0] > 0. and sampling_delta[1] > 0. and - sampling_delta[2] > 0.); - assert(sampling_offset[0] >= 0. and sampling_offset[1] >= 0. and - sampling_offset[2] >= 0.); + assert(Utils::Vector3d(sampling_delta) > Utils::Vector3d::broadcast(0.)); + assert(Utils::Vector3d(sampling_offset) >= Utils::Vector3d::broadcast(0.)); const auto n_samples_x = static_cast( std::rint((lim[0].second - lim[0].first) / sampling_delta[0])); const auto n_samples_y = static_cast( diff --git a/src/core/observables/ParticleAngularVelocities.hpp b/src/core/observables/ParticleAngularVelocities.hpp index aa1d476b9c4..ab59e1270b7 100644 --- a/src/core/observables/ParticleAngularVelocities.hpp +++ b/src/core/observables/ParticleAngularVelocities.hpp @@ -34,15 +34,13 @@ class ParticleAngularVelocities : public PidObservable { using PidObservable::PidObservable; std::vector - evaluate(Utils::Span> particles, - const ParticleObservables::traits &traits) const override { + evaluate(ParticleReferenceRange particles, + const ParticleObservables::traits &) const override { std::vector res(n_values()); - #ifdef ROTATION std::size_t i = 0; - for (auto p : particles) { - const Utils::Vector3d omega = - convert_vector_body_to_space(p.get(), p.get().m.omega); + for (auto const &p : particles) { + auto const omega = convert_vector_body_to_space(p.get(), p.get().m.omega); res[3 * i + 0] = omega[0]; res[3 * i + 1] = omega[1]; res[3 * i + 2] = omega[2]; diff --git a/src/core/observables/ParticleBodyAngularVelocities.hpp b/src/core/observables/ParticleBodyAngularVelocities.hpp index 749fa22cc87..9e2b3c6b3e5 100644 --- a/src/core/observables/ParticleBodyAngularVelocities.hpp +++ b/src/core/observables/ParticleBodyAngularVelocities.hpp @@ -33,14 +33,17 @@ class ParticleBodyAngularVelocities : public PidObservable { using PidObservable::PidObservable; std::vector - evaluate(Utils::Span> particles, - const ParticleObservables::traits &traits) const override { + evaluate(ParticleReferenceRange particles, + const ParticleObservables::traits &) const override { std::vector res(n_values()); #ifdef ROTATION - for (std::size_t i = 0; i < particles.size(); i++) { - res[3 * i + 0] = particles[i].get().m.omega[0]; - res[3 * i + 1] = particles[i].get().m.omega[1]; - res[3 * i + 2] = particles[i].get().m.omega[2]; + std::size_t i = 0; + for (auto const &p : particles) { + auto const &omega = p.get().m.omega; + res[3 * i + 0] = omega[0]; + res[3 * i + 1] = omega[1]; + res[3 * i + 2] = omega[2]; + i++; } #endif return res; diff --git a/src/core/observables/ParticleBodyVelocities.hpp b/src/core/observables/ParticleBodyVelocities.hpp index 9de70465634..70775fa9cf2 100644 --- a/src/core/observables/ParticleBodyVelocities.hpp +++ b/src/core/observables/ParticleBodyVelocities.hpp @@ -35,7 +35,7 @@ class ParticleBodyVelocities : public PidObservable { using PidObservable::PidObservable; std::vector - evaluate(Utils::Span> particles, + evaluate(ParticleReferenceRange particles, const ParticleObservables::traits &traits) const override { std::vector res(n_values()); for (std::size_t i = 0; i < particles.size(); i++) { diff --git a/src/core/observables/ParticleDistances.hpp b/src/core/observables/ParticleDistances.hpp index 61002e4ef4e..fd7e0212b81 100644 --- a/src/core/observables/ParticleDistances.hpp +++ b/src/core/observables/ParticleDistances.hpp @@ -48,7 +48,7 @@ class ParticleDistances : public PidObservable { } std::vector - evaluate(Utils::Span> particles, + evaluate(ParticleReferenceRange particles, const ParticleObservables::traits &traits) const override { std::vector res(n_values()); diff --git a/src/core/observables/ParticleForces.hpp b/src/core/observables/ParticleForces.hpp index a6ed45788b2..36688e37774 100644 --- a/src/core/observables/ParticleForces.hpp +++ b/src/core/observables/ParticleForces.hpp @@ -21,6 +21,7 @@ #include "Particle.hpp" #include "PidObservable.hpp" + #include #include @@ -36,12 +37,15 @@ class ParticleForces : public PidObservable { std::vector evaluate(ParticleReferenceRange particles, - const ParticleObservables::traits &traits) const override { + const ParticleObservables::traits &) const override { std::vector res(n_values()); - for (std::size_t i = 0; i < particles.size(); i++) { - res[3 * i + 0] = particles[i].get().f.f[0]; - res[3 * i + 1] = particles[i].get().f.f[1]; - res[3 * i + 2] = particles[i].get().f.f[2]; + std::size_t i = 0; + for (auto const &p : particles) { + auto const &f = p.get().f.f; + res[3 * i + 0] = f[0]; + res[3 * i + 1] = f[1]; + res[3 * i + 2] = f[2]; + i++; } return res; }; diff --git a/src/core/observables/ParticleTraits.hpp b/src/core/observables/ParticleTraits.hpp index 0fb7aa82a92..ccc60cee2e0 100644 --- a/src/core/observables/ParticleTraits.hpp +++ b/src/core/observables/ParticleTraits.hpp @@ -29,17 +29,17 @@ namespace ParticleObservables { * of observables independent of the particle type. */ template <> struct traits { - auto position(Particle const &p) const { return p.r.p; } + auto position(Particle const &p) const { return p.pos(); } auto velocity(Particle const &p) const { return p.m.v; } auto mass(Particle const &p) const { #ifdef VIRTUAL_SITES // we exclude virtual particles since their mass does not have a meaning - if (p.p.is_virtual) - return decltype(p.p.mass){}; + if (p.is_virtual()) + return decltype(p.mass()){}; #endif - return p.p.mass; + return p.mass(); } - auto charge(Particle const &p) const { return p.p.q; } + auto charge(Particle const &p) const { return p.q(); } auto dipole_moment(Particle const &p) const { #if defined(ROTATION) && defined(DIPOLES) return p.calc_dip(); diff --git a/src/core/observables/PidObservable.hpp b/src/core/observables/PidObservable.hpp index 523940e68d4..5e9da6d6bf5 100644 --- a/src/core/observables/PidObservable.hpp +++ b/src/core/observables/PidObservable.hpp @@ -113,7 +113,7 @@ template class ParticleObservable : public PidObservable { std::vector evaluate(ParticleReferenceRange particles, - const ParticleObservables::traits &traits) const override { + const ParticleObservables::traits &) const override { std::vector res; Utils::flatten(ObsType{}(particles), std::back_inserter(res)); return res; diff --git a/src/core/observables/TotalForce.hpp b/src/core/observables/TotalForce.hpp index 81c062cdf61..a333c46b85e 100644 --- a/src/core/observables/TotalForce.hpp +++ b/src/core/observables/TotalForce.hpp @@ -32,16 +32,14 @@ class TotalForce : public PidObservable { std::vector evaluate(ParticleReferenceRange particles, - const ParticleObservables::traits &traits) const override { - std::vector res(n_values()); + const ParticleObservables::traits &) const override { + Utils::Vector3d res{}; for (auto const &p : particles) { if (p.get().p.is_virtual) continue; - res[0] += p.get().f.f[0]; - res[1] += p.get().f.f[1]; - res[2] += p.get().f.f[2]; + res += p.get().f.f; } - return res; + return res.as_vector(); } }; } // Namespace Observables diff --git a/src/core/observables/fetch_particles.hpp b/src/core/observables/fetch_particles.hpp index 500ad964a64..cf5200166dd 100644 --- a/src/core/observables/fetch_particles.hpp +++ b/src/core/observables/fetch_particles.hpp @@ -52,8 +52,8 @@ inline std::vector fetch_particles(std::vector const &ids) { particles.push_back(get_particle_data(id)); auto &p = particles.back(); - p.r.p += image_shift(p.l.i, box_geo.length()); - p.l.i = {}; + p.pos() += image_shift(p.image_box(), box_geo.length()); + p.image_box() = {}; } offset += this_size; diff --git a/src/core/pair_criteria/pair_criteria.hpp b/src/core/pair_criteria/pair_criteria.hpp index fde2c8b4a2b..7f249b95a7a 100644 --- a/src/core/pair_criteria/pair_criteria.hpp +++ b/src/core/pair_criteria/pair_criteria.hpp @@ -24,8 +24,6 @@ #include "grid.hpp" #include "particle_data.hpp" -#include - namespace PairCriteria { /** @brief Criterion which provides a true/false for a pair of particles */ class PairCriterion { @@ -51,8 +49,8 @@ class PairCriterion { class DistanceCriterion : public PairCriterion { public: bool decide(const Particle &p1, const Particle &p2) const override { - return box_geo.get_mi_vector(p1.r.p, p2.r.p).norm() <= m_cut_off; - }; + return box_geo.get_mi_vector(p1.pos(), p2.pos()).norm() <= m_cut_off; + } double get_cut_off() { return m_cut_off; } void set_cut_off(double c) { m_cut_off = c; } @@ -65,15 +63,15 @@ class EnergyCriterion : public PairCriterion { public: bool decide(const Particle &p1, const Particle &p2) const override { // Distance between particles - auto const vec21 = box_geo.get_mi_vector(p1.r.p, p2.r.p); + auto const vec21 = box_geo.get_mi_vector(p1.pos(), p2.pos()); const double dist_betw_part = vec21.norm(); // Interaction parameters for particle types - IA_parameters const &ia_params = *get_ia_param(p1.p.type, p2.p.type); + IA_parameters const &ia_params = *get_ia_param(p1.type(), p2.type()); return (calc_non_bonded_pair_energy(p1, p2, ia_params, vec21, dist_betw_part)) >= m_cut_off; - }; + } double get_cut_off() { return m_cut_off; } void set_cut_off(double c) { m_cut_off = c; } @@ -87,8 +85,8 @@ class BondCriterion : public PairCriterion { bool decide(const Particle &p1, const Particle &p2) const override { return pair_bond_exists_on(p1.bonds(), p2.identity(), m_bond_type) || pair_bond_exists_on(p2.bonds(), p1.identity(), m_bond_type); - }; - int get_bond_type() { return m_bond_type; }; + } + int get_bond_type() { return m_bond_type; } void set_bond_type(int t) { m_bond_type = t; } private: diff --git a/src/core/particle_data.cpp b/src/core/particle_data.cpp index bd7a18140d8..501f1c98296 100644 --- a/src/core/particle_data.cpp +++ b/src/core/particle_data.cpp @@ -62,6 +62,13 @@ #include #include +constexpr auto some_tag = 42; +static bool type_list_enable; +static std::unordered_map> particle_type_map; + +void remove_id_from_map(int part_id, int type); +void add_id_to_type_map(int part_id, int type); + namespace { /** * @brief A generic particle update. @@ -176,22 +183,20 @@ using UpdateForceMessage = boost::variant * @brief Delete specific bond. */ struct RemoveBond { - std::vector bond; + std::vector bond; - void operator()(Particle &p) const { - assert(not bond.empty()); - auto const view = BondView(bond.front(), {bond.data() + 1, bond.size() - 1}); - auto it = boost::find(p.bonds(), view); + void operator()(Particle &p) const { + assert(not bond.empty()); + auto const view = BondView(bond.front(), {bond.data() + 1, bond.size() - 1}); + auto it = boost::find(p.bonds(), view); - if(it != p.bonds().end()) { - p.bonds().erase(it); - } + if (it != p.bonds().end()) { + p.bonds().erase(it); } + } - template - void serialize(Archive &ar, long int) { - ar & bond; - } + template + void serialize(Archive &ar, long int) { ar & bond; } }; /** @@ -221,28 +226,25 @@ struct RemovePairBondsTo { * @brief Delete all bonds. */ struct RemoveBonds { - void operator()(Particle &p) const { - p.bonds().clear(); - } + void operator()(Particle &p) const { p.bonds().clear(); } - template - void serialize(Archive &ar, long int) { - } + template + void serialize(Archive &, long int) {} }; struct AddBond { - std::vector bond; + std::vector bond; - void operator()(Particle &p) const { - auto const view = BondView(bond.at(0), {bond.data() + 1, bond.size() - 1}); + void operator()(Particle &p) const { + auto const view = BondView(bond.at(0), {bond.data() + 1, bond.size() - 1}); - p.bonds().insert(view); - } + p.bonds().insert(view); + } - template - void serialize(Archive &ar, long int) { - ar & bond; - } + template + void serialize(Archive &ar, long int) { + ar & bond; + } }; using UpdateBondMessage = boost::variant @@ -253,17 +255,17 @@ using UpdateBondMessage = boost::variant #ifdef ROTATION struct UpdateOrientation { - Utils::Vector3d axis; - double angle; + Utils::Vector3d axis; + double angle; - void operator()(Particle &p) const { - local_rotate_particle(p, axis, angle); - } + void operator()(Particle &p) const { + local_rotate_particle(p, axis, angle); + } - template - void serialize(Archive &ar, long int) { - ar & axis & angle; - } + template + void serialize(Archive &ar, long int) { + ar & axis & angle; + } }; #endif @@ -356,7 +358,7 @@ void local_remove_pair_bonds_to(Particle &p, int other_pid) { void mpi_send_update_message_local(int node, int id) { if (node == comm_cart.rank()) { UpdateMessage msg{}; - comm_cart.recv(0, SOME_TAG, msg); + comm_cart.recv(0, some_tag, msg); boost::apply_visitor(UpdateVisitor{id}, msg); } @@ -392,7 +394,7 @@ void mpi_send_update_message(int id, const UpdateMessage &msg) { * message to the target, otherwise we * can just apply the update directly. */ if (pnode != comm_cart.rank()) { - comm_cart.send(pnode, SOME_TAG, msg); + comm_cart.send(pnode, some_tag, msg); } else { boost::apply_visitor(UpdateVisitor{id}, msg); } @@ -412,14 +414,6 @@ void mpi_update_particle_property(int id, const T &value) { mpi_update_particle(id, value); } -/************************************************ - * variables - ************************************************/ -bool type_list_enable; -std::unordered_map> particle_type_map{}; -void remove_id_from_map(int part_id, int type); -void add_id_to_type_map(int part_id, int type); - /** * @brief id -> rank */ @@ -444,10 +438,9 @@ static void mpi_who_has_local() { sendbuf.resize(n_part); std::transform(local_particles.begin(), local_particles.end(), - sendbuf.begin(), - [](Particle const &p) { return p.p.identity; }); + sendbuf.begin(), [](Particle const &p) { return p.id(); }); - MPI_Send(sendbuf.data(), n_part, MPI_INT, 0, SOME_TAG, comm_cart); + MPI_Send(sendbuf.data(), n_part, MPI_INT, 0, some_tag, comm_cart); } REGISTER_CALLBACK(mpi_who_has_local) @@ -467,11 +460,11 @@ void mpi_who_has() { for (int pnode = 0; pnode < n_nodes; pnode++) { if (pnode == this_node) { for (auto const &p : local_particles) - particle_node[p.p.identity] = this_node; + particle_node[p.id()] = this_node; } else if (n_parts[pnode] > 0) { pdata.resize(n_parts[pnode]); - MPI_Recv(pdata.data(), n_parts[pnode], MPI_INT, pnode, SOME_TAG, + MPI_Recv(pdata.data(), n_parts[pnode], MPI_INT, pnode, some_tag, comm_cart, MPI_STATUS_IGNORE); for (int i = 0; i < n_parts[pnode]; i++) particle_node[pdata[i]] = pnode; @@ -518,7 +511,7 @@ std::size_t fetch_cache_max_size() { return particle_fetch_cache.max_size(); } boost::optional get_particle_data_local(int id) { auto p = cell_structure.get_local_particle(id); - if (p and (not p->l.ghost)) { + if (p and (not p->is_ghost())) { return *p; } @@ -559,7 +552,8 @@ static void mpi_get_particles_local() { return *cell_structure.get_local_particle(id); }); - Utils::Mpi::gatherv(comm_cart, parts.data(), parts.size(), 0); + Utils::Mpi::gatherv(comm_cart, parts.data(), static_cast(parts.size()), + 0); } REGISTER_CALLBACK(mpi_get_particles_local) @@ -609,8 +603,8 @@ std::vector mpi_get_particles(Utils::Span ids) { node_ids.cbegin(), node_ids.cend(), node_sizes.begin(), [](std::vector const &ids) { return static_cast(ids.size()); }); - Utils::Mpi::gatherv(comm_cart, parts.data(), parts.size(), parts.data(), - node_sizes.data(), 0); + Utils::Mpi::gatherv(comm_cart, parts.data(), static_cast(parts.size()), + parts.data(), node_sizes.data(), 0); return parts; } @@ -634,7 +628,7 @@ void prefetch_particle_data(Utils::Span in_ids) { /* Fetch the particles... */ for (auto &p : mpi_get_particles(ids)) { - auto id = p.identity(); + auto id = p.id(); particle_fetch_cache.put(id, std::move(p)); } } @@ -655,16 +649,16 @@ Particle *local_place_particle(int id, const Utils::Vector3d &pos, int _new) { if (_new) { Particle new_part; - new_part.p.identity = id; - new_part.r.p = pp; - new_part.l.i = i; + new_part.id() = id; + new_part.pos() = pp; + new_part.image_box() = i; return cell_structure.add_local_particle(std::move(new_part)); } auto pt = cell_structure.get_local_particle(id); - pt->r.p = pp; - pt->l.i = i; + pt->pos() = pp; + pt->image_box() = i; return pt; } @@ -694,7 +688,7 @@ int mpi_place_new_particle(int p_id, const Utils::Vector3d &pos) { void mpi_place_particle_local(int pnode, int p_id) { if (pnode == this_node) { Utils::Vector3d pos; - comm_cart.recv(0, SOME_TAG, pos); + comm_cart.recv(0, some_tag, pos); local_place_particle(p_id, pos, 0); } @@ -716,7 +710,7 @@ void mpi_place_particle(int node, int p_id, const Utils::Vector3d &pos) { if (node == this_node) local_place_particle(p_id, pos, 0); else { - comm_cart.send(node, SOME_TAG, pos); + comm_cart.send(node, some_tag, pos); } cell_structure.set_resort_particles(Cells::RESORT_GLOBAL); @@ -805,7 +799,7 @@ void set_particle_virtual(int part, bool is_virtual) { #ifdef VIRTUAL_SITES_RELATIVE void set_particle_vs_quat(int part, Utils::Quaternion const &vs_relative_quat) { - auto vs_relative = get_particle_data(part).p.vs_relative; + auto vs_relative = get_particle_data(part).vs_relative(); vs_relative.quat = vs_relative_quat; mpi_update_particle_property< @@ -850,7 +844,7 @@ void set_particle_type(int p_id, int type) { // check if the particle exists already and the type is changed, then remove // it from the list which contains it auto const &cur_par = get_particle_data(p_id); - int prev_type = cur_par.p.type; + int prev_type = cur_par.type(); if (prev_type != type) { // particle existed before so delete it from the list remove_id_from_map(p_id, prev_type); @@ -996,7 +990,7 @@ int remove_particle(int p_id) { auto const &cur_par = get_particle_data(p_id); if (type_list_enable) { // remove particle from its current type_list - int type = cur_par.p.type; + int type = cur_par.type(); remove_id_from_map(p_id, type); } @@ -1014,16 +1008,16 @@ int remove_particle(int p_id) { void local_rescale_particles(int dir, double scale) { for (auto &p : cell_structure.local_particles()) { if (dir < 3) - p.r.p[dir] *= scale; + p.pos()[dir] *= scale; else { - p.r.p *= scale; + p.pos() *= scale; } } } static void mpi_rescale_particles_local(int dir) { double scale = 0.0; - MPI_Recv(&scale, 1, MPI_DOUBLE, 0, SOME_TAG, comm_cart, MPI_STATUS_IGNORE); + MPI_Recv(&scale, 1, MPI_DOUBLE, 0, some_tag, comm_cart, MPI_STATUS_IGNORE); local_rescale_particles(dir, scale); on_particle_change(); } @@ -1036,7 +1030,7 @@ void mpi_rescale_particles(int dir, double scale) { if (pnode == this_node) { local_rescale_particles(dir, scale); } else { - MPI_Send(&scale, 1, MPI_DOUBLE, pnode, SOME_TAG, comm_cart); + MPI_Send(&scale, 1, MPI_DOUBLE, pnode, some_tag, comm_cart); } } on_particle_change(); @@ -1115,7 +1109,7 @@ void auto_exclusions(int distance) { /* determine initial connectivity */ for (auto const &part1 : partCfg()) { - auto const p1 = part1.p.identity; + auto const p1 = part1.id(); for (auto const bond : part1.bonds()) { if ((bond.partner_ids().size() == 1) and (bond.partner_ids()[0] != p1)) { auto const p2 = bond.partner_ids()[0]; @@ -1130,7 +1124,7 @@ void auto_exclusions(int distance) { */ for (int count = 1; count < distance; count++) { for (auto const &p : partCfg()) { - auto const p1 = p.identity(); + auto const p1 = p.id(); for (int i = 0; i < partners[p1].size(); i += 2) { auto const p2 = partners[p1][i]; auto const dist1 = partners[p1][i + 1]; @@ -1174,7 +1168,7 @@ void init_type_map(int type) { map_for_type.clear(); for (auto const &p : partCfg()) { if (p.p.type == type) - map_for_type.insert(p.p.identity); + map_for_type.insert(p.id()); } } diff --git a/src/core/particle_data.hpp b/src/core/particle_data.hpp index afb4252afdc..ce61404536e 100644 --- a/src/core/particle_data.hpp +++ b/src/core/particle_data.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _PARTICLE_DATA_H -#define _PARTICLE_DATA_H +#ifndef CORE_PARTICLE_DATA_HPP +#define CORE_PARTICLE_DATA_HPP /** \file * Particles and particle lists. * diff --git a/src/core/polymer.cpp b/src/core/polymer.cpp index 7629d0cc200..398bfda52b9 100644 --- a/src/core/polymer.cpp +++ b/src/core/polymer.cpp @@ -171,9 +171,8 @@ draw_polymer_positions(PartCfg &partCfg, int const n_polymers, /* Try up to max_tries times to draw a valid position */ auto draw_valid_monomer_position = [&](int p, int m) -> boost::optional { - for (unsigned _ = 0; _ < max_tries; _++) { + for (unsigned i = 0; i < max_tries; i++) { auto const trial_pos = draw_monomer_position(p, m); - if (is_valid_pos(trial_pos)) { return trial_pos; } @@ -187,7 +186,8 @@ draw_polymer_positions(PartCfg &partCfg, int const n_polymers, for (int attempts_poly = 0; attempts_poly < max_tries; attempts_poly++) { int rejections = 0; while (positions[p].size() < beads_per_chain) { - auto pos = draw_valid_monomer_position(p, positions[p].size()); + auto pos = draw_valid_monomer_position( + p, static_cast(positions[p].size())); if (pos) { /* Move on one position */ diff --git a/src/core/pressure_inline.hpp b/src/core/pressure_inline.hpp index eef2ff4718f..5b5f18302c8 100644 --- a/src/core/pressure_inline.hpp +++ b/src/core/pressure_inline.hpp @@ -18,12 +18,11 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -/** \file - * Pressure calculation. Really similar to energy.hpp. - */ - #ifndef CORE_PRESSURE_INLINE_HPP #define CORE_PRESSURE_INLINE_HPP +/** \file + * Pressure calculation. + */ #include "config.hpp" @@ -62,12 +61,12 @@ inline void add_non_bonded_pair_virials(Particle const &p1, Particle const &p2, if (do_nonbonded(p1, p2)) #endif { - IA_parameters const &ia_params = *get_ia_param(p1.p.type, p2.p.type); + IA_parameters const &ia_params = *get_ia_param(p1.type(), p2.type()); auto const force = calc_non_bonded_pair_force(p1, p2, ia_params, d, dist).f; auto const stress = tensor_product(d, force); - auto const type1 = p1.p.mol_id; - auto const type2 = p2.p.mol_id; + auto const type1 = p1.mol_id(); + auto const type2 = p2.mol_id(); obs_pressure.add_non_bonded_contribution(type1, type2, flatten(stress)); } @@ -96,7 +95,7 @@ inline void add_non_bonded_pair_virials(Particle const &p1, Particle const &p2, boost::optional> calc_bonded_virial_pressure_tensor(Bonded_IA_Parameters const &iaparams, Particle const &p1, Particle const &p2) { - auto const dx = box_geo.get_mi_vector(p1.r.p, p2.r.p); + auto const dx = box_geo.get_mi_vector(p1.pos(), p2.pos()); auto const result = calc_bond_pair_force(p1, p2, iaparams, dx); if (result) { auto const &force = result.get(); @@ -117,8 +116,8 @@ calc_bonded_three_body_pressure_tensor(Bonded_IA_Parameters const &iaparams, (boost::get(&iaparams) != nullptr) || #endif (boost::get(&iaparams) != nullptr)) { - auto const dx21 = -box_geo.get_mi_vector(p1.r.p, p2.r.p); - auto const dx31 = box_geo.get_mi_vector(p3.r.p, p1.r.p); + auto const dx21 = -box_geo.get_mi_vector(p1.pos(), p2.pos()); + auto const dx31 = box_geo.get_mi_vector(p3.pos(), p1.pos()); auto const result = calc_bonded_three_body_force(iaparams, p1, p2, p3); if (result) { @@ -161,13 +160,13 @@ calc_bonded_pressure_tensor(Bonded_IA_Parameters const &iaparams, */ inline void add_kinetic_virials(Particle const &p1, Observable_stat &obs_pressure) { - if (p1.p.is_virtual) + if (p1.is_virtual()) return; /* kinetic pressure */ for (int k = 0; k < 3; k++) for (int l = 0; l < 3; l++) - obs_pressure.kinetic[k * 3 + l] += p1.m.v[k] * p1.m.v[l] * p1.p.mass; + obs_pressure.kinetic[k * 3 + l] += p1.v()[k] * p1.v()[l] * p1.mass(); } -#endif +#endif // CORE_PRESSURE_INLINE_HPP diff --git a/src/core/random.hpp b/src/core/random.hpp index 0ff4a000d0c..b2d1212c153 100644 --- a/src/core/random.hpp +++ b/src/core/random.hpp @@ -78,12 +78,12 @@ auto philox_4_uint64s(uint64_t counter, uint32_t seed, int key1, int key2 = 0) { using ctr_type = rng_type::ctr_type; using key_type = rng_type::key_type; - const ctr_type c{counter}; + const ctr_type c{{counter, 0u, 0u, 0u}}; auto const id1 = static_cast(key1); auto const id2 = static_cast(key2); - const key_type k{Utils::u32_to_u64(id1, id2), - Utils::u32_to_u64(static_cast(salt), seed)}; + const key_type k{{Utils::u32_to_u64(id1, id2), + Utils::u32_to_u64(static_cast(salt), seed)}}; return rng_type{}(c, k); } @@ -166,11 +166,13 @@ auto noise_gaussian(uint64_t counter, uint32_t seed, int key1, int key2 = 0) { // sin/cos are evaluated simultaneously by gcc or separately by Clang Utils::VectorXd noise{}; constexpr double two_pi = 2.0 * Utils::pi(); - auto const modulo = sqrt(-2.0 * log(u[0])); - auto const angle = two_pi * u[1]; - noise[0] = modulo * cos(angle); - if (N > 1) { - noise[1] = modulo * sin(angle); + { + auto const modulo = sqrt(-2.0 * log(u[0])); + auto const angle = two_pi * u[1]; + noise[0] = modulo * cos(angle); + if (N > 1) { + noise[1] = modulo * sin(angle); + } } if (N > 2) { auto const modulo = sqrt(-2.0 * log(u[2])); diff --git a/src/core/rattle.cpp b/src/core/rattle.cpp index e141405167c..052dc931bef 100644 --- a/src/core/rattle.cpp +++ b/src/core/rattle.cpp @@ -44,7 +44,7 @@ */ void save_old_position(const ParticleRange &particles, const ParticleRange &ghost_particles) { - auto save_pos = [](Particle &p) { p.r.p_last_timestep = p.r.p; }; + auto save_pos = [](Particle &p) { p.pos_last_time_step() = p.pos(); }; boost::for_each(particles, save_pos); boost::for_each(ghost_particles, save_pos); @@ -58,7 +58,7 @@ void save_old_position(const ParticleRange &particles, */ static void init_correction_vector(const ParticleRange &particles, const ParticleRange &ghost_particles) { - auto reset_force = [](Particle &p) { p.rattle.correction.fill(0); }; + auto reset_force = [](Particle &p) { p.rattle_params().correction.fill(0); }; boost::for_each(particles, reset_force); boost::for_each(ghost_particles, reset_force); @@ -74,19 +74,19 @@ static void init_correction_vector(const ParticleRange &particles, */ static bool calculate_positional_correction(RigidBond const &ia_params, Particle &p1, Particle &p2) { - auto const r_ij = box_geo.get_mi_vector(p1.r.p, p2.r.p); + auto const r_ij = box_geo.get_mi_vector(p1.pos(), p2.pos()); auto const r_ij2 = r_ij.norm2(); if (std::abs(1.0 - r_ij2 / ia_params.d2) > ia_params.p_tol) { auto const r_ij_t = - box_geo.get_mi_vector(p1.r.p_last_timestep, p2.r.p_last_timestep); + box_geo.get_mi_vector(p1.pos_last_time_step(), p2.pos_last_time_step()); auto const r_ij_dot = r_ij_t * r_ij; auto const G = - 0.50 * (ia_params.d2 - r_ij2) / r_ij_dot / (p1.p.mass + p2.p.mass); + 0.50 * (ia_params.d2 - r_ij2) / r_ij_dot / (p1.mass() + p2.mass()); auto const pos_corr = G * r_ij_t; - p1.rattle.correction += pos_corr * p2.p.mass; - p2.rattle.correction -= pos_corr * p1.p.mass; + p1.rattle_params().correction += pos_corr * p2.mass(); + p2.rattle_params().correction -= pos_corr * p1.mass(); return true; } @@ -128,8 +128,8 @@ static bool compute_correction_vector(CellStructure &cs, Kernel kernel) { */ static void apply_positional_correction(const ParticleRange &particles) { boost::for_each(particles, [](Particle &p) { - p.r.p += p.rattle.correction; - p.m.v += p.rattle.correction; + p.pos() += p.rattle_params().correction; + p.v() += p.rattle_params().correction; }); } @@ -177,17 +177,17 @@ void correct_position_shake(CellStructure &cs) { */ static bool calculate_velocity_correction(RigidBond const &ia_params, Particle &p1, Particle &p2) { - auto const v_ij = p1.m.v - p2.m.v; - auto const r_ij = box_geo.get_mi_vector(p1.r.p, p2.r.p); + auto const v_ij = p1.v() - p2.v(); + auto const r_ij = box_geo.get_mi_vector(p1.pos(), p2.pos()); auto const v_proj = v_ij * r_ij; if (std::abs(v_proj) > ia_params.v_tol) { - auto const K = v_proj / ia_params.d2 / (p1.p.mass + p2.p.mass); + auto const K = v_proj / ia_params.d2 / (p1.mass() + p2.mass()); auto const vel_corr = K * r_ij; - p1.rattle.correction -= vel_corr * p2.p.mass; - p2.rattle.correction += vel_corr * p1.p.mass; + p1.rattle_params().correction -= vel_corr * p2.mass(); + p2.rattle_params().correction += vel_corr * p1.mass(); return true; } @@ -201,7 +201,8 @@ static bool calculate_velocity_correction(RigidBond const &ia_params, * @param particles particle range */ static void apply_velocity_correction(const ParticleRange &particles) { - boost::for_each(particles, [](Particle &p) { p.m.v += p.rattle.correction; }); + boost::for_each(particles, + [](Particle &p) { p.v() += p.rattle_params().correction; }); } void correct_velocity_shake(CellStructure &cs) { diff --git a/src/core/rattle.hpp b/src/core/rattle.hpp index a5dd83b2e62..3cea6c97a79 100644 --- a/src/core/rattle.hpp +++ b/src/core/rattle.hpp @@ -33,9 +33,9 @@ #include "CellStructure.hpp" -/** Transfer the current particle positions from @ref ParticlePosition::p - * "Particle::r::p" to @ref ParticlePosition::p_last_timestep - * "Particle::r::p_last_timestep" +/** Transfer the current particle positions from @ref Particle::pos + * "Particle::pos" to @ref Particle::pos_last_time_step + * "Particle::pos_last_time_step" */ void save_old_position(const ParticleRange &particles, const ParticleRange &ghost_particles); diff --git a/src/core/reaction_methods/ConstantpHEnsemble.hpp b/src/core/reaction_methods/ConstantpHEnsemble.hpp index b533999cfa9..0fe32c23980 100644 --- a/src/core/reaction_methods/ConstantpHEnsemble.hpp +++ b/src/core/reaction_methods/ConstantpHEnsemble.hpp @@ -40,8 +40,11 @@ namespace ReactionMethods { */ class ConstantpHEnsemble : public ReactionAlgorithm { public: - ConstantpHEnsemble(int seed) : ReactionAlgorithm(seed) {} - double m_constant_pH = -10; + ConstantpHEnsemble(int seed, double kT, double exclusion_radius, + double constant_pH) + : ReactionAlgorithm(seed, kT, exclusion_radius), + m_constant_pH(constant_pH) {} + double m_constant_pH; protected: double calculate_acceptance_probability( diff --git a/src/core/reaction_methods/ReactionAlgorithm.cpp b/src/core/reaction_methods/ReactionAlgorithm.cpp index 925baab8523..d6bea4bc8b6 100644 --- a/src/core/reaction_methods/ReactionAlgorithm.cpp +++ b/src/core/reaction_methods/ReactionAlgorithm.cpp @@ -55,7 +55,7 @@ int ReactionAlgorithm::do_reaction(int reaction_steps) { auto current_E_pot = calculate_current_potential_energy_of_system(); for (int i = 0; i < reaction_steps; i++) { int reaction_id = i_random(static_cast(reactions.size())); - generic_oneway_reaction(reactions[reaction_id], current_E_pot); + generic_oneway_reaction(*reactions[reaction_id], current_E_pot); } return 0; } @@ -64,17 +64,12 @@ int ReactionAlgorithm::do_reaction(int reaction_steps) { * Adds a reaction to the reaction system */ void ReactionAlgorithm::add_reaction( - double gamma, const std::vector &reactant_types, - const std::vector &reactant_coefficients, - const std::vector &product_types, - const std::vector &product_coefficients) { - SingleReaction new_reaction(gamma, reactant_types, reactant_coefficients, - product_types, product_coefficients); + std::shared_ptr const &new_reaction) { // make ESPResSo count the particle numbers which take part in the reactions - for (int reactant_type : new_reaction.reactant_types) + for (int reactant_type : new_reaction->reactant_types) init_type_map(reactant_type); - for (int product_type : new_reaction.product_types) + for (int product_type : new_reaction->product_types) init_type_map(product_type); init_type_map(non_interacting_type); @@ -91,20 +86,13 @@ void ReactionAlgorithm::check_reaction_method() const { throw std::runtime_error("Reaction system not initialized"); } - if (kT < 0) { - throw std::runtime_error("kT cannot be negative," - "normally it should be 1.0. This will be used" - "directly to calculate beta:=1/(k_B T) which" - "occurs in the exp(-beta*E)\n"); - } - #ifdef ELECTROSTATICS // check for the existence of default charges for all types that take part in // the reactions for (const auto ¤t_reaction : reactions) { // check for reactants - for (int reactant_type : current_reaction.reactant_types) { + for (int reactant_type : current_reaction->reactant_types) { auto it = charges_of_types.find(reactant_type); if (it == charges_of_types.end()) { std::string message = std::string("Forgot to assign charge to type ") + @@ -113,7 +101,7 @@ void ReactionAlgorithm::check_reaction_method() const { } } // check for products - for (int product_type : current_reaction.product_types) { + for (int product_type : current_reaction->product_types) { auto it = charges_of_types.find(product_type); if (it == charges_of_types.end()) { std::string message = std::string("Forgot to assign charge to type ") + @@ -127,12 +115,9 @@ void ReactionAlgorithm::check_reaction_method() const { /** * Automatically sets the volume which is used by the reaction ensemble to the - * volume of a cuboid box, if not already initialized with another value. + * volume of a cuboid box. */ -void ReactionAlgorithm::set_cuboid_reaction_ensemble_volume() { - if (volume < 0) - volume = box_geo.volume(); -} +void ReactionAlgorithm::update_volume() { volume = box_geo.volume(); } /** * Checks whether all particles exist for the provided reaction. @@ -250,8 +235,7 @@ ReactionAlgorithm::make_reaction_attempt( * when a reaction attempt is rejected. */ void ReactionAlgorithm::restore_properties( - std::vector const &property_list, - const int number_of_saved_properties) { + std::vector const &property_list) { // this function restores all properties of all particles provided in the // property list, the format of the property list is (p_id,charge,type) // repeated for each particle that occurs in that list @@ -303,9 +287,6 @@ void ReactionAlgorithm::generic_oneway_reaction( std::vector p_ids_created_particles; std::vector hidden_particles_properties; std::vector changed_particles_properties; - // save p_id, charge and type of the reactant particle, only thing we - // need to hide the particle and recover it - const int number_of_saved_properties = 3; std::tie(changed_particles_properties, p_ids_created_particles, hidden_particles_properties) = @@ -353,10 +334,9 @@ void ReactionAlgorithm::generic_oneway_reaction( delete_particle(p_ids_created_particle); } // 2) restore previously hidden reactant particles - restore_properties(hidden_particles_properties, number_of_saved_properties); + restore_properties(hidden_particles_properties); // 3) restore previously changed reactant particles - restore_properties(changed_particles_properties, - number_of_saved_properties); + restore_properties(changed_particles_properties); } } @@ -413,7 +393,7 @@ void ReactionAlgorithm::check_exclusion_radius(int p_id) { * delete unbonded particles since bonds are coupled to ids. This is used to * avoid the id range becoming excessively huge. */ -int ReactionAlgorithm::delete_particle(int p_id) { +void ReactionAlgorithm::delete_particle(int p_id) { auto const old_max_seen_id = get_maximal_particle_id(); if (p_id == old_max_seen_id) { // last particle, just delete @@ -435,7 +415,6 @@ int ReactionAlgorithm::delete_particle(int p_id) { throw std::runtime_error( "Particle id is greater than the max seen particle id"); } - return 0; } void ReactionAlgorithm::set_cyl_constraint(double center_x, double center_y, @@ -533,7 +512,6 @@ void ReactionAlgorithm::move_particle(int p_id, Utils::Vector3d const &new_pos, std::vector> ReactionAlgorithm::generate_new_particle_positions(int type, int n_particles) { - std::vector p_id_s_changed_particles; std::vector> old_positions; old_positions.reserve(n_particles); diff --git a/src/core/reaction_methods/ReactionAlgorithm.hpp b/src/core/reaction_methods/ReactionAlgorithm.hpp index 0253a1eb523..a089cdae3d5 100644 --- a/src/core/reaction_methods/ReactionAlgorithm.hpp +++ b/src/core/reaction_methods/ReactionAlgorithm.hpp @@ -28,7 +28,9 @@ #include #include +#include #include +#include #include #include #include @@ -45,24 +47,33 @@ struct StoredParticleProperty { class ReactionAlgorithm { public: - ReactionAlgorithm(int seed) + ReactionAlgorithm(int seed, double kT, double exclusion_radius) : m_generator(Random::mt19937(std::seed_seq({seed, seed, seed}))), m_normal_distribution(0.0, 1.0), m_uniform_real_distribution(0.0, 1.0) { + if (kT < 0.) { + throw std::domain_error("Invalid value for 'kT'"); + } + if (exclusion_radius < 0.) { + throw std::domain_error("Invalid value for 'exclusion_radius'"); + } + this->kT = kT; + this->exclusion_radius = exclusion_radius; + update_volume(); } virtual ~ReactionAlgorithm() = default; - std::vector reactions; + std::vector> reactions; std::map charges_of_types; - double kT = -10.0; + double kT; /** * Hard sphere radius. If particles are closer than this value, * it is assumed that their interaction energy gets approximately * infinite, therefore these configurations do not contribute * to the partition function and ensemble averages. */ - double exclusion_radius = 0.0; - double volume = -10.0; + double exclusion_radius; + double volume; int non_interacting_type = 100; int m_accepted_configurational_MC_moves = 0; @@ -72,21 +83,30 @@ class ReactionAlgorithm { static_cast(m_tried_configurational_MC_moves); } - void set_cuboid_reaction_ensemble_volume(); + auto get_kT() const { return kT; } + auto get_exclusion_radius() const { return exclusion_radius; } + auto get_volume() const { return volume; } + void set_volume(double new_volume) { + if (new_volume <= 0.) { + throw std::domain_error("Invalid value for 'volume'"); + } + volume = new_volume; + } + void update_volume(); virtual int do_reaction(int reaction_steps); void check_reaction_method() const; void remove_constraint() { m_reaction_constraint = ReactionConstraint::NONE; } void set_cyl_constraint(double center_x, double center_y, double radius); void set_slab_constraint(double slab_start_z, double slab_end_z); Utils::Vector2d get_slab_constraint_parameters() const { + if (m_reaction_constraint != ReactionConstraint::SLAB_Z) { + throw std::runtime_error("no slab constraint is currently active"); + } return {m_slab_start_z, m_slab_end_z}; } - int delete_particle(int p_id); - void add_reaction(double gamma, const std::vector &reactant_types, - const std::vector &reactant_coefficients, - const std::vector &product_types, - const std::vector &product_coefficients); + void delete_particle(int p_id); + void add_reaction(std::shared_ptr const &new_reaction); void delete_reaction(int reaction_id) { reactions.erase(reactions.begin() + reaction_id); } @@ -125,8 +145,7 @@ class ReactionAlgorithm { std::vector> generate_new_particle_positions(int type, int n_particles); void - restore_properties(std::vector const &property_list, - int number_of_saved_properties); + restore_properties(std::vector const &property_list); /** * @brief draws a random integer from the uniform distribution in the range @@ -142,10 +161,10 @@ class ReactionAlgorithm { all_reactant_particles_exist(SingleReaction const ¤t_reaction) const; protected: - virtual double calculate_acceptance_probability( - SingleReaction const ¤t_reaction, double E_pot_old, - double E_pot_new, std::map const &old_particle_numbers) const { - return -10; + virtual double + calculate_acceptance_probability(SingleReaction const &, double, double, + std::map const &) const { + return -10.; } private: diff --git a/src/core/reaction_methods/ReactionEnsemble.hpp b/src/core/reaction_methods/ReactionEnsemble.hpp index c484e7176cd..4bfa2dcef6c 100644 --- a/src/core/reaction_methods/ReactionEnsemble.hpp +++ b/src/core/reaction_methods/ReactionEnsemble.hpp @@ -36,7 +36,8 @@ namespace ReactionMethods { */ class ReactionEnsemble : public ReactionAlgorithm { public: - ReactionEnsemble(int seed) : ReactionAlgorithm(seed) {} + ReactionEnsemble(int seed, double kT, double exclusion_radius) + : ReactionAlgorithm(seed, kT, exclusion_radius) {} protected: double calculate_acceptance_probability( diff --git a/src/core/reaction_methods/SingleReaction.hpp b/src/core/reaction_methods/SingleReaction.hpp index ffc89835db2..5bd1a75da49 100644 --- a/src/core/reaction_methods/SingleReaction.hpp +++ b/src/core/reaction_methods/SingleReaction.hpp @@ -32,6 +32,14 @@ struct SingleReaction { std::vector const &reactant_coefficients, std::vector const &product_types, std::vector const &product_coefficients) { + if (reactant_types.size() != reactant_coefficients.size()) { + throw std::invalid_argument( + "reactants: number of types and coefficients have to match"); + } + if (product_types.size() != product_coefficients.size()) { + throw std::invalid_argument( + "products: number of types and coefficients have to match"); + } this->reactant_types = reactant_types; this->reactant_coefficients = reactant_coefficients; this->product_types = product_types; diff --git a/src/core/reaction_methods/WidomInsertion.cpp b/src/core/reaction_methods/WidomInsertion.cpp index d8ca7342b2c..c0680182bdc 100644 --- a/src/core/reaction_methods/WidomInsertion.cpp +++ b/src/core/reaction_methods/WidomInsertion.cpp @@ -42,11 +42,6 @@ double WidomInsertion::calculate_particle_insertion_potential_energy( std::vector p_ids_created_particles; std::vector hidden_particles_properties; std::vector changed_particles_properties; - - // save p_id, charge and type of the reactant particle, only thing we - // need to hide the particle and recover it - auto constexpr number_of_saved_properties = 3; - std::tie(changed_particles_properties, p_ids_created_particles, hidden_particles_properties) = make_reaction_attempt(current_reaction); @@ -59,9 +54,9 @@ double WidomInsertion::calculate_particle_insertion_potential_energy( delete_particle(p_ids_created_particle); } // 2) restore previously hidden reactant particles - restore_properties(hidden_particles_properties, number_of_saved_properties); + restore_properties(hidden_particles_properties); // 3) restore previously changed reactant particles - restore_properties(changed_particles_properties, number_of_saved_properties); + restore_properties(changed_particles_properties); // calculate the particle insertion potential energy auto const E_pot_insertion = E_pot_new - E_pot_old; diff --git a/src/core/reaction_methods/WidomInsertion.hpp b/src/core/reaction_methods/WidomInsertion.hpp index e455a045aa0..5196f950245 100644 --- a/src/core/reaction_methods/WidomInsertion.hpp +++ b/src/core/reaction_methods/WidomInsertion.hpp @@ -28,7 +28,8 @@ namespace ReactionMethods { /** Widom insertion method */ class WidomInsertion : public ReactionAlgorithm { public: - WidomInsertion(int seed) : ReactionAlgorithm(seed) {} + WidomInsertion(int seed, double kT, double exclusion_radius) + : ReactionAlgorithm(seed, kT, exclusion_radius) {} double calculate_particle_insertion_potential_energy( SingleReaction ¤t_reaction); }; diff --git a/src/core/reaction_methods/tests/ConstantpHEnsemble_test.cpp b/src/core/reaction_methods/tests/ConstantpHEnsemble_test.cpp index d110205fcaa..ec9a6eb930d 100644 --- a/src/core/reaction_methods/tests/ConstantpHEnsemble_test.cpp +++ b/src/core/reaction_methods/tests/ConstantpHEnsemble_test.cpp @@ -50,9 +50,7 @@ BOOST_AUTO_TEST_CASE(ConstantpHEnsemble_test) { }; constexpr double tol = 100 * std::numeric_limits::epsilon(); - ConstantpHEnsembleTest r_algo(42); - r_algo.kT = 20.; - r_algo.m_constant_pH = 1.; + ConstantpHEnsembleTest r_algo(42, 20., 0., 1.); // exception if no reaction was added BOOST_CHECK_THROW(r_algo.check_reaction_method(), std::runtime_error); diff --git a/src/core/reaction_methods/tests/ReactionAlgorithm_test.cpp b/src/core/reaction_methods/tests/ReactionAlgorithm_test.cpp index da8c2f9d880..2c6fd66e3ee 100644 --- a/src/core/reaction_methods/tests/ReactionAlgorithm_test.cpp +++ b/src/core/reaction_methods/tests/ReactionAlgorithm_test.cpp @@ -59,7 +59,7 @@ BOOST_AUTO_TEST_CASE(ReactionAlgorithm_test) { constexpr double tol = 100 * std::numeric_limits::epsilon(); // check acceptance rate - ReactionAlgorithmTest r_algo(42); + ReactionAlgorithmTest r_algo(42, 1., 0.); for (int tried_moves = 1; tried_moves < 5; ++tried_moves) { for (int accepted_moves = 0; accepted_moves < 5; ++accepted_moves) { r_algo.m_tried_configurational_MC_moves = tried_moves; @@ -87,11 +87,11 @@ BOOST_AUTO_TEST_CASE(ReactionAlgorithm_test) { // check reaction addition { - r_algo.add_reaction(reaction.gamma, reaction.reactant_types, - reaction.reactant_coefficients, reaction.product_types, - reaction.product_coefficients); + r_algo.add_reaction(std::make_shared( + reaction.gamma, reaction.reactant_types, reaction.reactant_coefficients, + reaction.product_types, reaction.product_coefficients)); BOOST_REQUIRE_EQUAL(r_algo.reactions.size(), 1ul); - auto const &value = r_algo.reactions[0]; + auto const &value = *r_algo.reactions[0]; BOOST_TEST(value.reactant_types == reaction.reactant_types, boost::test_tools::per_element()); BOOST_TEST(value.reactant_coefficients == reaction.reactant_coefficients, @@ -110,12 +110,6 @@ BOOST_AUTO_TEST_CASE(ReactionAlgorithm_test) { BOOST_CHECK_EQUAL(probability, -10.); } - // exception if kT is negative - BOOST_CHECK_THROW(r_algo.check_reaction_method(), std::runtime_error); - - // set kT - r_algo.kT = 1.; - #ifdef ELECTROSTATICS // exception if reactant types have no charge information BOOST_CHECK_THROW(r_algo.check_reaction_method(), std::runtime_error); @@ -131,16 +125,16 @@ BOOST_AUTO_TEST_CASE(ReactionAlgorithm_test) { // check reaction removal { - SingleReaction const new_reaction(5., {type_B}, {1}, {type_C}, {1}); - r_algo.add_reaction(new_reaction.gamma, new_reaction.reactant_types, - new_reaction.reactant_coefficients, - new_reaction.product_types, - new_reaction.product_coefficients); + auto const new_gamma = 5.; + auto const new_reaction = std::make_shared( + new_gamma, std::vector{type_B}, std::vector{1}, + std::vector{type_C}, std::vector{1}); + r_algo.add_reaction(new_reaction); BOOST_REQUIRE_EQUAL(r_algo.reactions.size(), 2ul); - BOOST_CHECK_EQUAL(r_algo.reactions[1].gamma, new_reaction.gamma); + BOOST_CHECK_EQUAL(r_algo.reactions[1]->gamma, new_gamma); r_algo.delete_reaction(1); BOOST_REQUIRE_EQUAL(r_algo.reactions.size(), 1ul); - BOOST_CHECK_EQUAL(r_algo.reactions[0].gamma, reaction.gamma); + BOOST_CHECK_EQUAL(r_algo.reactions[0]->gamma, reaction.gamma); r_algo.delete_reaction(0); BOOST_REQUIRE_EQUAL(r_algo.reactions.size(), 0ul); } diff --git a/src/core/reaction_methods/tests/ReactionEnsemble_test.cpp b/src/core/reaction_methods/tests/ReactionEnsemble_test.cpp index 436fc3fa779..d115fb424e8 100644 --- a/src/core/reaction_methods/tests/ReactionEnsemble_test.cpp +++ b/src/core/reaction_methods/tests/ReactionEnsemble_test.cpp @@ -57,13 +57,12 @@ BOOST_AUTO_TEST_CASE(ReactionEnsemble_test) { using ReactionEnsemble::generic_oneway_reaction; using ReactionEnsemble::ReactionEnsemble; }; - constexpr double tol = 100 * std::numeric_limits::epsilon(); + auto constexpr tol = 100 * std::numeric_limits::epsilon(); // check basic interface { - ReactionEnsembleTest r_algo(42); - r_algo.volume = 10.; - r_algo.kT = 20.; + ReactionEnsembleTest r_algo(42, 20., 0.); + r_algo.set_volume(10.); // exception if no reaction was added BOOST_CHECK_THROW(r_algo.check_reaction_method(), std::runtime_error); @@ -75,7 +74,6 @@ BOOST_AUTO_TEST_CASE(ReactionEnsemble_test) { SingleReaction const reaction(2., {type_A}, {1}, {type_B, type_C}, {3, 4}); // check acceptance probability - constexpr auto g = factorial_Ni0_divided_by_factorial_Ni0_plus_nu_i; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { @@ -99,10 +97,8 @@ BOOST_AUTO_TEST_CASE(ReactionEnsemble_test) { // check that the system energy is updated after a succesful reaction { - ReactionEnsembleTest test_reaction(42); - test_reaction.volume = 1.; - test_reaction.kT = 1.; - test_reaction.exclusion_radius = 0; + ReactionEnsembleTest test_reaction(42, 1., 0.); + test_reaction.set_volume(1.); // create a generic identity exchange reaction D <-> E int const type_D = 0; diff --git a/src/core/reaction_methods/tests/particle_tracking_test.cpp b/src/core/reaction_methods/tests/particle_tracking_test.cpp index cdffe2cef92..fdf80b802eb 100644 --- a/src/core/reaction_methods/tests/particle_tracking_test.cpp +++ b/src/core/reaction_methods/tests/particle_tracking_test.cpp @@ -32,15 +32,12 @@ #include -#include #include #include // Check the mechanism that tracks particles of a certain type and the // function that selects a random particle in the pool of tracked particles. BOOST_FIXTURE_TEST_CASE(particle_type_map_test, ParticleFactory) { - constexpr double tol = 100 * std::numeric_limits::epsilon(); - // particle properties int const type = 10; int const pid = 1; diff --git a/src/core/rotate_system.cpp b/src/core/rotate_system.cpp index f55dd95deb9..79800122e0a 100644 --- a/src/core/rotate_system.cpp +++ b/src/core/rotate_system.cpp @@ -42,9 +42,9 @@ static void mpi_rotate_system_local(double phi, double theta, double alpha) { double local_mass = 0.0; for (auto const &p : particles) { - if (not p.p.is_virtual) { - local_com += p.p.mass * p.r.p; - local_mass += p.p.mass; + if (not p.is_virtual()) { + local_com += p.mass() * p.pos(); + local_mass += p.mass(); } } @@ -62,7 +62,7 @@ static void mpi_rotate_system_local(double phi, double theta, double alpha) { // Rotate particle coordinates for (auto &p : particles) { // Move the center of mass of the system to the origin - p.r.p = com + Utils::vec_rotate(axis, alpha, p.r.p - com); + p.pos() = com + Utils::vec_rotate(axis, alpha, p.pos() - com); #ifdef ROTATION local_rotate_particle(p, axis, alpha); #endif diff --git a/src/core/statistics.cpp b/src/core/statistics.cpp index 5ad04741ed5..b6563441352 100644 --- a/src/core/statistics.cpp +++ b/src/core/statistics.cpp @@ -58,9 +58,9 @@ double mindist(PartCfg &partCfg, const std::vector &set1, for (auto jt = partCfg.begin(); jt != partCfg.end(); ++jt) { /* check which sets particle j belongs to (bit 0: set1, bit1: set2) */ auto in_set = 0u; - if (set1.empty() || contains(set1, jt->p.type)) + if (set1.empty() || contains(set1, jt->type())) in_set = 1u; - if (set2.empty() || contains(set2, jt->p.type)) + if (set2.empty() || contains(set2, jt->type())) in_set |= 2u; if (in_set == 0) continue; @@ -68,10 +68,10 @@ double mindist(PartCfg &partCfg, const std::vector &set1, for (auto it = std::next(jt); it != partCfg.end(); ++it) /* accept a pair if particle j is in set1 and particle i in set2 or vice * versa. */ - if (((in_set & 1u) && (set2.empty() || contains(set2, it->p.type))) || - ((in_set & 2u) && (set1.empty() || contains(set1, it->p.type)))) - mindist2 = - std::min(mindist2, box_geo.get_mi_vector(jt->r.p, it->r.p).norm2()); + if (((in_set & 1u) && (set2.empty() || contains(set2, it->type()))) || + ((in_set & 2u) && (set1.empty() || contains(set1, it->type())))) + mindist2 = std::min( + mindist2, box_geo.get_mi_vector(jt->pos(), it->pos()).norm2()); } return std::sqrt(mindist2); @@ -82,7 +82,7 @@ static Utils::Vector3d mpi_particle_momentum_local() { auto const momentum = std::accumulate(particles.begin(), particles.end(), Utils::Vector3d{}, [](Utils::Vector3d &m, Particle const &p) { - return m + p.p.mass * p.m.v; + return m + p.mass() * p.v(); }); return momentum; @@ -110,10 +110,10 @@ Utils::Vector3d centerofmass(PartCfg &partCfg, int type) { double mass = 0.0; for (auto const &p : partCfg) { - if ((p.p.type == type) || (type == -1)) - if (not p.p.is_virtual) { - com += p.r.p * p.p.mass; - mass += p.p.mass; + if ((p.type() == type) || (type == -1)) + if (not p.is_virtual()) { + com += p.pos() * p.mass(); + mass += p.mass(); } } com /= mass; @@ -124,9 +124,9 @@ Utils::Vector3d angularmomentum(PartCfg &partCfg, int type) { Utils::Vector3d am{}; for (auto const &p : partCfg) { - if ((p.p.type == type) || (type == -1)) - if (not p.p.is_virtual) { - am += p.p.mass * vector_product(p.r.p, p.m.v); + if ((p.type() == type) || (type == -1)) + if (not p.is_virtual()) { + am += p.mass() * vector_product(p.pos(), p.v()); } } return am; @@ -143,10 +143,10 @@ void momentofinertiamatrix(PartCfg &partCfg, int type, double *MofImatrix) { auto const com = centerofmass(partCfg, type); for (auto const &p : partCfg) { - if (type == p.p.type and (not p.p.is_virtual)) { + if (type == p.type() and (not p.is_virtual())) { count++; - p1 = p.r.p - com; - massi = p.p.mass; + p1 = p.pos() - com; + massi = p.mass(); MofImatrix[0] += massi * (p1[1] * p1[1] + p1[2] * p1[2]); MofImatrix[4] += massi * (p1[0] * p1[0] + p1[2] * p1[2]); MofImatrix[8] += massi * (p1[0] * p1[0] + p1[1] * p1[1]); @@ -172,16 +172,16 @@ std::vector nbhood(PartCfg &partCfg, const Utils::Vector3d &pos, for (auto const &p : partCfg) { if ((planedims[0] + planedims[1] + planedims[2]) == 3) { - d = box_geo.get_mi_vector(pt, p.r.p); + d = box_geo.get_mi_vector(pt, p.pos()); } else { /* Calculate the in plane distance */ for (int j = 0; j < 3; j++) { - d[j] = planedims[j] * (p.r.p[j] - pt[j]); + d[j] = planedims[j] * (p.pos()[j] - pt[j]); } } if (d.norm2() < r2) { - ids.push_back(p.p.identity); + ids.push_back(p.id()); } } @@ -192,8 +192,9 @@ double distto(PartCfg &partCfg, const Utils::Vector3d &pos, int pid) { auto mindist = std::numeric_limits::infinity(); for (auto const &part : partCfg) { - if (pid != part.p.identity) { - auto const d = box_geo.get_mi_vector({pos[0], pos[1], pos[2]}, part.r.p); + if (pid != part.id()) { + auto const d = + box_geo.get_mi_vector({pos[0], pos[1], pos[2]}, part.pos()); mindist = std::min(mindist, d.norm2()); } } @@ -222,15 +223,15 @@ void calc_part_distribution(PartCfg &partCfg, std::vector const &p1_types, /* particle loop: p1_types */ for (auto const &p1 : partCfg) { for (int t1 : p1_types) { - if (p1.p.type == t1) { + if (p1.type() == t1) { min_dist2 = start_dist2; /* particle loop: p2_types */ for (auto const &p2 : partCfg) { if (p1 != p2) { for (int t2 : p2_types) { - if (p2.p.type == t2) { + if (p2.type() == t2) { auto const act_dist2 = - box_geo.get_mi_vector(p1.r.p, p2.r.p).norm2(); + box_geo.get_mi_vector(p1.pos(), p2.pos()).norm2(); if (act_dist2 < min_dist2) { min_dist2 = act_dist2; } @@ -273,7 +274,7 @@ void calc_structurefactor(PartCfg &partCfg, std::vector const &p_types, if (order < 1) throw std::domain_error("order has to be a strictly positive number"); - auto const order_sq = order * order; + auto const order_sq = Utils::sqr(static_cast(order)); std::vector ff(2 * order_sq + 1); auto const twoPI_L = 2 * Utils::pi() * box_geo.length_inv()[0]; @@ -281,12 +282,13 @@ void calc_structurefactor(PartCfg &partCfg, std::vector const &p_types, for (int j = -order; j <= order; j++) { for (int k = -order; k <= order; k++) { auto const n = i * i + j * j + k * k; - if ((n <= order_sq) && (n >= 1)) { + if ((static_cast(n) <= order_sq) && (n >= 1)) { double C_sum = 0.0, S_sum = 0.0; for (auto const &p : partCfg) { for (int t : p_types) { - if (p.p.type == t) { - auto const qr = twoPI_L * (Utils::Vector3i{{i, j, k}} * p.r.p); + if (p.type() == t) { + auto const qr = + twoPI_L * (Utils::Vector3i{{i, j, k}} * p.pos()); C_sum += cos(qr); S_sum += sin(qr); } @@ -302,13 +304,13 @@ void calc_structurefactor(PartCfg &partCfg, std::vector const &p_types, long n_particles = 0l; for (auto const &p : partCfg) { for (int t : p_types) { - if (p.p.type == t) + if (p.type() == t) n_particles++; } } int length = 0; - for (int qi = 0; qi < order_sq; qi++) { + for (std::size_t qi = 0; qi < order_sq; qi++) { if (ff[2 * qi + 1] != 0) { ff[2 * qi] /= static_cast(n_particles) * ff[2 * qi + 1]; length++; @@ -319,9 +321,9 @@ void calc_structurefactor(PartCfg &partCfg, std::vector const &p_types, intensities.resize(length); int cnt = 0; - for (int i = 0; i < order_sq; i++) { + for (std::size_t i = 0; i < order_sq; i++) { if (ff[2 * i + 1] != 0) { - wavevectors[cnt] = twoPI_L * sqrt(i + 1); + wavevectors[cnt] = twoPI_L * sqrt(static_cast(i + 1)); intensities[cnt] = ff[2 * i]; cnt++; } diff --git a/src/core/statistics.hpp b/src/core/statistics.hpp index 5b7f95e2ffa..598542bee4b 100644 --- a/src/core/statistics.hpp +++ b/src/core/statistics.hpp @@ -18,8 +18,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _STATISTICS_H -#define _STATISTICS_H +#ifndef CORE_STATISTICS_HPP +#define CORE_STATISTICS_HPP /** \file * Statistical tools to analyze simulations. * diff --git a/src/core/statistics_chain.cpp b/src/core/statistics_chain.cpp index 1a174634b19..3aca1593094 100644 --- a/src/core/statistics_chain.cpp +++ b/src/core/statistics_chain.cpp @@ -19,8 +19,8 @@ * along with this program. If not, see . */ /** \file - Implementation of \ref statistics_chain.hpp "statistics_chain.hpp". -*/ + * Implementation of \ref statistics_chain.hpp "statistics_chain.hpp". + */ #include "statistics_chain.hpp" #include "Particle.hpp" @@ -28,6 +28,7 @@ #include "particle_data.hpp" #include +#include #include #include @@ -43,8 +44,9 @@ std::array calc_re(int chain_start, int chain_n_chains, get_particle_data(chain_start + i * chain_length + chain_length - 1); auto const &p2 = get_particle_data(chain_start + i * chain_length); - auto const d = unfolded_position(p1.r.p, p1.l.i, box_geo.length()) - - unfolded_position(p2.r.p, p2.l.i, box_geo.length()); + auto const d = + unfolded_position(p1.pos(), p1.image_box(), box_geo.length()) - + unfolded_position(p2.pos(), p2.image_box(), box_geo.length()); auto const norm2 = d.norm2(); dist += sqrt(norm2); dist2 += norm2; @@ -61,7 +63,6 @@ std::array calc_re(int chain_start, int chain_n_chains, std::array calc_rg(int chain_start, int chain_n_chains, int chain_length) { double r_G = 0.0, r_G2 = 0.0, r_G4 = 0.0; - double tmp; std::array rg; for (int i = 0; i < chain_n_chains; i++) { @@ -70,59 +71,62 @@ std::array calc_rg(int chain_start, int chain_n_chains, for (int j = 0; j < chain_length; j++) { auto const &p = get_particle_data(chain_start + i * chain_length + j); - if (p.p.is_virtual) { + if (p.is_virtual()) { throw std::runtime_error( "Gyration tensor is not well-defined for chains including virtual " "sites. Virtual sites do not have a meaningful mass."); } - r_CM += unfolded_position(p.r.p, p.l.i, box_geo.length()) * p.p.mass; - M += p.p.mass; + r_CM += unfolded_position(p.pos(), p.image_box(), box_geo.length()) * + p.mass(); + M += p.mass(); } r_CM /= M; - tmp = 0.0; + double tmp = 0.0; for (int j = 0; j < chain_length; ++j) { auto const &p = get_particle_data(chain_start + i * chain_length + j); Utils::Vector3d const d = - unfolded_position(p.r.p, p.l.i, box_geo.length()) - r_CM; + unfolded_position(p.pos(), p.image_box(), box_geo.length()) - r_CM; tmp += d.norm2(); } - tmp /= (double)chain_length; + tmp /= static_cast(chain_length); r_G += sqrt(tmp); r_G2 += tmp; r_G4 += tmp * tmp; } - tmp = static_cast(chain_n_chains); + auto const tmp = static_cast(chain_n_chains); rg[0] = r_G / tmp; rg[2] = r_G2 / tmp; - rg[1] = sqrt(rg[2] - rg[0] * rg[0]); - rg[3] = sqrt(r_G4 / tmp - rg[2] * rg[2]); + rg[1] = sqrt(rg[2] - Utils::sqr(rg[0])); + rg[3] = sqrt(r_G4 / tmp - Utils::sqr(rg[2])); return rg; } std::array calc_rh(int chain_start, int chain_n_chains, int chain_length) { - double r_H = 0.0, r_H2 = 0.0, ri = 0.0, prefac, tmp; + double r_H = 0.0, r_H2 = 0.0; std::array rh; - prefac = 0.5 * chain_length * (chain_length - 1); + auto const chain_l = static_cast(chain_length); + auto const prefac = 0.5 * chain_l * (chain_l - 1.); for (int p = 0; p < chain_n_chains; p++) { - ri = 0.0; + double ri = 0.0; for (int i = chain_start + chain_length * p; i < chain_start + chain_length * (p + 1); i++) { auto const &p1 = get_particle_data(i); for (int j = i + 1; j < chain_start + chain_length * (p + 1); j++) { auto const &p2 = get_particle_data(j); - auto const d = unfolded_position(p1.r.p, p1.l.i, box_geo.length()) - - unfolded_position(p2.r.p, p2.l.i, box_geo.length()); + auto const d = + unfolded_position(p1.pos(), p1.image_box(), box_geo.length()) - + unfolded_position(p2.pos(), p2.image_box(), box_geo.length()); ri += 1.0 / d.norm(); } } - tmp = prefac / ri; + auto const tmp = prefac / ri; r_H += tmp; r_H2 += tmp * tmp; } - tmp = static_cast(chain_n_chains); + auto const tmp = static_cast(chain_n_chains); rh[0] = r_H / tmp; - rh[1] = sqrt(r_H2 / tmp - rh[0] * rh[0]); + rh[1] = sqrt(r_H2 / tmp - Utils::sqr(rh[0])); return rh; } diff --git a/src/core/stokesian_dynamics/sd_interface.cpp b/src/core/stokesian_dynamics/sd_interface.cpp index e41c1045619..9b9c61e9a1b 100644 --- a/src/core/stokesian_dynamics/sd_interface.cpp +++ b/src/core/stokesian_dynamics/sd_interface.cpp @@ -49,7 +49,7 @@ namespace { struct SD_particle_data { SD_particle_data() = default; explicit SD_particle_data(Particle const &p) - : type(p.p.type), pos(p.r.p), ext_force(p.f) {} + : type(p.type()), pos(p.pos()), ext_force(p.f) {} int type = 0; @@ -93,7 +93,7 @@ void sd_update_locally(ParticleRange const &parts) { for (auto &p : parts) { // skip virtual particles - if (p.p.is_virtual) { + if (p.is_virtual()) { continue; } diff --git a/src/core/thermostats/npt_inline.hpp b/src/core/thermostats/npt_inline.hpp index 72afebe7ed3..398147c277e 100644 --- a/src/core/thermostats/npt_inline.hpp +++ b/src/core/thermostats/npt_inline.hpp @@ -49,16 +49,13 @@ friction_therm0_nptiso(IsotropicNptThermostat const &npt_iso, static_assert(step == 1 or step == 2, "NPT only has 2 integration steps"); constexpr auto const salt = (step == 1) ? RNGSalt::NPTISO0_HALF_STEP1 : RNGSalt::NPTISO0_HALF_STEP2; - if (thermo_switch & THERMO_NPT_ISO) { - if (npt_iso.pref_noise_0 > 0.0) { - return npt_iso.pref_rescale_0 * vel + - npt_iso.pref_noise_0 * - Random::noise_uniform(npt_iso.rng_counter(), - npt_iso.rng_seed(), p_identity); - } - return npt_iso.pref_rescale_0 * vel; + if (npt_iso.pref_noise_0 > 0.0) { + return npt_iso.pref_rescale_0 * vel + + npt_iso.pref_noise_0 * + Random::noise_uniform(npt_iso.rng_counter(), + npt_iso.rng_seed(), p_identity); } - return {}; + return npt_iso.pref_rescale_0 * vel; } /** Add p_diff-dependent noise and friction for NpT-sims to \ref @@ -66,16 +63,13 @@ friction_therm0_nptiso(IsotropicNptThermostat const &npt_iso, */ inline double friction_thermV_nptiso(IsotropicNptThermostat const &npt_iso, double p_diff) { - if (thermo_switch & THERMO_NPT_ISO) { - if (npt_iso.pref_noise_V > 0.0) { - return npt_iso.pref_rescale_V * p_diff + - npt_iso.pref_noise_V * - Random::noise_uniform( - npt_iso.rng_counter(), npt_iso.rng_seed(), 0); - } - return npt_iso.pref_rescale_V * p_diff; + if (npt_iso.pref_noise_V > 0.0) { + return npt_iso.pref_rescale_V * p_diff + + npt_iso.pref_noise_V * + Random::noise_uniform( + npt_iso.rng_counter(), npt_iso.rng_seed(), 0); } - return 0.0; + return npt_iso.pref_rescale_V * p_diff; } #endif // NPT diff --git a/src/core/tuning.cpp b/src/core/tuning.cpp index 1a332acd270..e9c4f8a76df 100644 --- a/src/core/tuning.cpp +++ b/src/core/tuning.cpp @@ -97,7 +97,6 @@ void tune_skin(double min_skin, double max_skin, double tol, int int_steps, double a = min_skin; double b = max_skin; - double time_a, time_b; /* The maximal skin is the remainder from the required cutoff to * the maximal range that can be supported by the cell system, but @@ -111,10 +110,10 @@ void tune_skin(double min_skin, double max_skin, double tol, int int_steps, while (fabs(a - b) > tol) { mpi_set_skin(a); - time_a = time_calc(int_steps); + auto const time_a = time_calc(int_steps); mpi_set_skin(b); - time_b = time_calc(int_steps); + auto const time_b = time_calc(int_steps); if (time_a > time_b) { a = 0.5 * (a + b); diff --git a/src/core/unit_tests/BondList_test.cpp b/src/core/unit_tests/BondList_test.cpp index 186e30aede3..765f3ca638a 100644 --- a/src/core/unit_tests/BondList_test.cpp +++ b/src/core/unit_tests/BondList_test.cpp @@ -33,7 +33,7 @@ BOOST_AUTO_TEST_CASE(BondView_) { /* Dummy values */ auto const id = 5; - auto const partners = std::array{12, 13, 14}; + auto const partners = std::array{{12, 13, 14}}; /* BondView can be constructed from an id and a partner range */ auto const view = BondView{id, partners}; @@ -45,7 +45,7 @@ BOOST_AUTO_TEST_CASE(BondView_) { /* Comparison ops */ { auto const partners_same = partners; - auto const partners_different = std::array{15, 16}; + auto const partners_different = std::array{{15, 16}}; BOOST_CHECK((BondView{id, partners} == BondView{id, partners_same})); BOOST_CHECK(not(BondView{id, partners} != BondView{id, partners_same})); @@ -90,7 +90,7 @@ BOOST_AUTO_TEST_CASE(Iterator_incement_) { BOOST_AUTO_TEST_CASE(insert_) { /* Dummy values */ - auto const partners = std::array{1, 2, 3}; + auto const partners = std::array{{1, 2, 3}}; auto const bond1 = BondView{1, partners}; auto const bond2 = BondView{2, partners}; @@ -114,7 +114,7 @@ BOOST_AUTO_TEST_CASE(insert_) { } BOOST_AUTO_TEST_CASE(erase_) { - auto const partners = std::array{1, 2, 3}; + auto const partners = std::array{{1, 2, 3}}; auto const bond1 = BondView{1, partners}; auto const bond2 = BondView{2, partners}; auto const bond3 = BondView{3, partners}; @@ -166,7 +166,7 @@ BOOST_AUTO_TEST_CASE(erase_) { } BOOST_AUTO_TEST_CASE(clear_) { - auto const partners = std::array{1, 2, 3}; + auto const partners = std::array{{1, 2, 3}}; auto const bond1 = BondView{1, partners}; auto const bond2 = BondView{2, partners}; @@ -181,7 +181,7 @@ BOOST_AUTO_TEST_CASE(clear_) { } BOOST_AUTO_TEST_CASE(serialization_) { - auto const partners = std::array{4, 5, 6}; + auto const partners = std::array{{4, 5, 6}}; auto const bond1 = BondView{1, partners}; auto const bond2 = BondView{2, partners}; diff --git a/src/core/unit_tests/CMakeLists.txt b/src/core/unit_tests/CMakeLists.txt index b3771dd21e6..185fd3c0bb8 100644 --- a/src/core/unit_tests/CMakeLists.txt +++ b/src/core/unit_tests/CMakeLists.txt @@ -51,6 +51,8 @@ unit_test(NAME Lattice_test SRC Lattice_test.cpp DEPENDS EspressoCore) unit_test(NAME lb_exceptions SRC lb_exceptions.cpp DEPENDS EspressoCore) unit_test(NAME Verlet_list_test SRC Verlet_list_test.cpp DEPENDS EspressoCore NUM_PROC 4) +unit_test(NAME VerletCriterion_test SRC VerletCriterion_test.cpp DEPENDS + EspressoCore) unit_test(NAME thermostats_test SRC thermostats_test.cpp DEPENDS EspressoCore) unit_test(NAME random_test SRC random_test.cpp DEPENDS EspressoUtils Random123) unit_test(NAME BondList_test SRC BondList_test.cpp DEPENDS EspressoCore) diff --git a/src/core/unit_tests/EspressoSystemStandAlone_test.cpp b/src/core/unit_tests/EspressoSystemStandAlone_test.cpp index 47c5c876336..0498412943f 100644 --- a/src/core/unit_tests/EspressoSystemStandAlone_test.cpp +++ b/src/core/unit_tests/EspressoSystemStandAlone_test.cpp @@ -155,8 +155,8 @@ BOOST_FIXTURE_TEST_CASE(espresso_system_stand_alone, ParticleFactory, auto const acc_value = time_series.back(); auto const obs_value = (*obs)(); - BOOST_TEST(obs_value == p.m.v, boost::test_tools::per_element()); - BOOST_TEST(acc_value == p.m.v, boost::test_tools::per_element()); + BOOST_TEST(obs_value == p.v(), boost::test_tools::per_element()); + BOOST_TEST(acc_value == p.v(), boost::test_tools::per_element()); } } @@ -166,7 +166,7 @@ BOOST_FIXTURE_TEST_CASE(espresso_system_stand_alone, ParticleFactory, for (int i = 0; i < 5; ++i) { set_particle_v(pid2, {static_cast(i), 0., 0.}); auto const &p = get_particle_data(pid2); - auto const kinetic_energy = 0.5 * p.p.mass * p.m.v.norm2(); + auto const kinetic_energy = 0.5 * p.mass() * p.v().norm2(); auto const obs_energy = calculate_energy(); BOOST_CHECK_CLOSE(obs_energy->kinetic[0], kinetic_energy, tol); BOOST_CHECK_CLOSE(observable_compute_energy(), kinetic_energy, tol); @@ -226,9 +226,6 @@ BOOST_FIXTURE_TEST_CASE(espresso_system_stand_alone, ParticleFactory, // measure energies auto const obs_energy = calculate_energy(); - auto const &p1 = get_particle_data(pid1); - auto const &p2 = get_particle_data(pid2); - auto const &p3 = get_particle_data(pid3); auto const none_energy = 0.0; auto const harm_energy = 0.5 * harm_bond.k * Utils::sqr(harm_bond.r - dist); auto const fene_energy = @@ -256,7 +253,7 @@ BOOST_FIXTURE_TEST_CASE(espresso_system_stand_alone, ParticleFactory, // measure energies auto const step = 0.02; - auto const pos1 = get_particle_data(pid1).r.p; + auto const pos1 = get_particle_data(pid1).pos(); Utils::Vector3d pos2{box_center, box_center - 0.1, 1.0}; for (int i = 0; i < 10; ++i) { // move particle @@ -295,19 +292,19 @@ BOOST_FIXTURE_TEST_CASE(espresso_system_stand_alone, ParticleFactory, auto const &p2 = get_particle_data(pid2); auto const &p3 = get_particle_data(pid3); // forces are symmetric - BOOST_CHECK_CLOSE(p1.f.f[0], -p2.f.f[0], tol); - BOOST_CHECK_CLOSE(p3.f.f[1], -p2.f.f[1], tol); + BOOST_CHECK_CLOSE(p1.force()[0], -p2.force()[0], tol); + BOOST_CHECK_CLOSE(p3.force()[1], -p2.force()[1], tol); // periodic image contributions to the electrostatic force are negligible - BOOST_CHECK_LE(std::abs(p1.f.f[1]), tol); - BOOST_CHECK_LE(std::abs(p1.f.f[2]), tol); - BOOST_CHECK_LE(std::abs(p2.f.f[2]), tol); + BOOST_CHECK_LE(std::abs(p1.force()[1]), tol); + BOOST_CHECK_LE(std::abs(p1.force()[2]), tol); + BOOST_CHECK_LE(std::abs(p2.force()[2]), tol); // zero long-range contribution for uncharged particles - BOOST_CHECK_EQUAL(p3.f.f[0], 0.); - BOOST_CHECK_EQUAL(p3.f.f[2], 0.); + BOOST_CHECK_EQUAL(p3.force()[0], 0.); + BOOST_CHECK_EQUAL(p3.force()[2], 0.); // velocities are not propagated - BOOST_CHECK_EQUAL(p1.m.v.norm(), 0.); - BOOST_CHECK_EQUAL(p2.m.v.norm(), 0.); - BOOST_CHECK_EQUAL(p3.m.v.norm(), 0.); + BOOST_CHECK_EQUAL(p1.v().norm(), 0.); + BOOST_CHECK_EQUAL(p2.v().norm(), 0.); + BOOST_CHECK_EQUAL(p3.v().norm(), 0.); // check integrated trajectory; the time step is chosen // small enough so that particles don't travel too far @@ -319,15 +316,15 @@ BOOST_FIXTURE_TEST_CASE(espresso_system_stand_alone, ParticleFactory, std::unordered_map expected; for (auto pid : pids) { auto p = get_particle_data(pid); - p.m.v += 0.5 * time_step * p.f.f / p.p.mass; - p.r.p += time_step * p.m.v; - expected[pid] = p.r.p; + p.v() += 0.5 * time_step * p.force() / p.p.mass; + p.pos() += time_step * p.v(); + expected[pid] = p.pos(); } mpi_integrate(1, 0); for (auto pid : pids) { auto const &p = get_particle_data(pid); - BOOST_CHECK_LE((p.r.p - expected[pid]).norm(), tol); - assert((p.r.p - pos_com).norm() < 0.5); + BOOST_CHECK_LE((p.pos() - expected[pid]).norm(), tol); + assert((p.pos() - pos_com).norm() < 0.5); } } } diff --git a/src/core/unit_tests/LocalBox_test.cpp b/src/core/unit_tests/LocalBox_test.cpp index 1d951894f98..c0aa5ca1e3d 100644 --- a/src/core/unit_tests/LocalBox_test.cpp +++ b/src/core/unit_tests/LocalBox_test.cpp @@ -21,6 +21,7 @@ #define BOOST_TEST_DYN_LINK #include +#include "CellStructureType.hpp" #include "LocalBox.hpp" #include @@ -52,14 +53,16 @@ BOOST_AUTO_TEST_CASE(constructors) { { Utils::Vector const lower_corner = {1., 2., 3.}; Utils::Vector const local_box_length = {4., 5., 6.}; - Utils::Array const boundaries = {-1, 0, 1, 1, 0, -1}; + Utils::Array const boundaries = {{{-1, 0, 1, 1, 0, -1}}}; + CellStructureType const type = CellStructureType::CELL_STRUCTURE_REGULAR; auto const box = - LocalBox(lower_corner, local_box_length, boundaries); + LocalBox(lower_corner, local_box_length, boundaries, type); BOOST_CHECK(box.my_left() == lower_corner); BOOST_CHECK(box.length() == local_box_length); BOOST_CHECK(boost::equal(boundaries, box.boundary())); + BOOST_CHECK(box.cell_structure_type() == type); check_length(box); } } diff --git a/src/core/unit_tests/ParticleFactory.hpp b/src/core/unit_tests/ParticleFactory.hpp index d59e18b93a2..f1a66c00abb 100644 --- a/src/core/unit_tests/ParticleFactory.hpp +++ b/src/core/unit_tests/ParticleFactory.hpp @@ -35,14 +35,7 @@ struct ParticleFactory { } } - void create_particle(Utils::Vector3d const &pos, int pid = -1, - int type = -1) { - if (pid < 0) { - pid = get_maximal_particle_id() + 1; - } - if (type < 0) { - type = 0; - } + void create_particle(Utils::Vector3d const &pos, int pid, int type) { place_particle(pid, pos); set_particle_type(pid, type); particle_cache.emplace_back(pid); diff --git a/src/core/unit_tests/ParticleIterator_test.cpp b/src/core/unit_tests/ParticleIterator_test.cpp index 892c44a204a..5953546756a 100644 --- a/src/core/unit_tests/ParticleIterator_test.cpp +++ b/src/core/unit_tests/ParticleIterator_test.cpp @@ -38,11 +38,8 @@ using Cell = Testing::Cell; std::vector> make_cells(std::size_t n) { std::vector> cells(n); - - for (auto &c : cells) { - c = std::make_unique(); - } - + std::generate(cells.begin(), cells.end(), + []() { return std::make_unique(); }); return cells; } diff --git a/src/core/unit_tests/Particle_test.cpp b/src/core/unit_tests/Particle_test.cpp index 63cc49aed37..af08ba21579 100644 --- a/src/core/unit_tests/Particle_test.cpp +++ b/src/core/unit_tests/Particle_test.cpp @@ -63,11 +63,11 @@ BOOST_AUTO_TEST_CASE(serialization) { auto p = Particle(); auto const bond_id = 5; - auto const bond_partners = std::array{12, 13, 14}; + auto const bond_partners = std::array{{12, 13, 14}}; std::vector el = {5, 6, 7, 8}; - p.p.identity = 15; + p.id() = 15; p.bonds().insert({bond_id, bond_partners}); #ifdef EXCLUSIONS p.exclusions() = el; @@ -81,7 +81,7 @@ BOOST_AUTO_TEST_CASE(serialization) { auto q = Particle(); in_ar >> q; - BOOST_CHECK(q.p.identity == p.p.identity); + BOOST_CHECK(q.id() == p.id()); BOOST_CHECK((*q.bonds().begin() == BondView{bond_id, bond_partners})); #ifdef EXCLUSIONS @@ -241,4 +241,38 @@ BOOST_AUTO_TEST_CASE(rattle_constructors) { check_particle_rattle(out, pr); } } -#endif +#endif // BOND_CONSTRAINT + +#ifdef EXTERNAL_FORCES +#ifdef ROTATION +BOOST_AUTO_TEST_CASE(particle_bitfields) { + auto p = Particle(); + + // check default values + BOOST_CHECK(not p.has_fixed_coordinates()); + BOOST_CHECK(not p.can_rotate()); + BOOST_CHECK(not p.is_fixed_along(1)); + BOOST_CHECK(not p.can_rotate_around(1)); + + // check setting of one axis + p.set_fixed_along(1, true); + p.set_can_rotate_around(1, true); + BOOST_CHECK(p.is_fixed_along(1)); + BOOST_CHECK(p.can_rotate_around(1)); + BOOST_CHECK(p.has_fixed_coordinates()); + BOOST_CHECK(p.can_rotate()); + + // check that unsetting is properly registered + p.set_fixed_along(1, false); + p.set_can_rotate_around(1, false); + BOOST_CHECK(not p.has_fixed_coordinates()); + BOOST_CHECK(not p.can_rotate()); + + // check setting of all flags at once + p.set_can_rotate_all_axes(); + BOOST_CHECK(p.can_rotate_around(0)); + BOOST_CHECK(p.can_rotate_around(1)); + BOOST_CHECK(p.can_rotate_around(2)); +} +#endif // ROTATION +#endif // EXTERNAL_FORCES \ No newline at end of file diff --git a/src/core/unit_tests/VerletCriterion_test.cpp b/src/core/unit_tests/VerletCriterion_test.cpp new file mode 100644 index 00000000000..9de2de54834 --- /dev/null +++ b/src/core/unit_tests/VerletCriterion_test.cpp @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2022 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define BOOST_TEST_MODULE Verlet criterion checks +#define BOOST_TEST_DYN_LINK +#include + +#include "CellStructure.hpp" +#include "Particle.hpp" +#include "config.hpp" +#include "nonbonded_interactions/VerletCriterion.hpp" +#include "particle_data.hpp" + +BOOST_AUTO_TEST_CASE(VerletCriterion_test) { + auto constexpr skin = 0.4; + auto constexpr max_cut = 2.5; + auto constexpr coulomb_cut = 2.0; + auto constexpr dipolar_cut = 1.8; + auto constexpr collision_cut = 1.6; + + struct GetMaxCutoff { + double operator()(int, int) const { return skin + max_cut; } + }; + struct GetZeroCutoff { + double operator()(int, int) const { return -skin; } + }; + + VerletCriterion criterion(skin, max_cut); + VerletCriterion criterion_inactive(skin, INACTIVE_CUTOFF); + VerletCriterion criterion_long_range( + skin, max_cut, coulomb_cut, dipolar_cut, collision_cut); + + Particle p1, p2; + p1.id() = 1; + p2.id() = 2; + + { + auto constexpr cutoff = skin + max_cut; + auto const below = Distance{Utils::Vector3d{cutoff - 0.1, 0.0, 0.0}}; + auto const above = Distance{Utils::Vector3d{cutoff + 0.1, 0.0, 0.0}}; + BOOST_CHECK(criterion(p1, p2, below)); + BOOST_CHECK(!criterion_inactive(p1, p2, below)); + BOOST_CHECK(!criterion(p1, p2, above)); + BOOST_CHECK(!criterion_inactive(p1, p2, above)); + } + +#ifdef ELECTROSTATICS + { + auto constexpr cutoff = skin + coulomb_cut; + auto const below = Distance{Utils::Vector3d{cutoff - 0.1, 0.0, 0.0}}; + auto const above = Distance{Utils::Vector3d{cutoff + 0.1, 0.0, 0.0}}; + BOOST_CHECK(!criterion_long_range(p1, p2, below)); + BOOST_CHECK(!criterion_long_range(p1, p2, above)); + p2.q() = 1.; + BOOST_CHECK(!criterion_long_range(p1, p2, below)); + BOOST_CHECK(!criterion_long_range(p1, p2, above)); + p1.q() = 1.; + BOOST_CHECK(criterion_long_range(p1, p2, below)); + BOOST_CHECK(!criterion_long_range(p1, p2, above)); + p1.q() = 0.; + p2.q() = 0.; + } +#endif // ELECTROSTATICS + +#ifdef DIPOLES + { + auto constexpr cutoff = skin + dipolar_cut; + auto const below = Distance{Utils::Vector3d{cutoff - 0.1, 0.0, 0.0}}; + auto const above = Distance{Utils::Vector3d{cutoff + 0.1, 0.0, 0.0}}; + BOOST_CHECK(!criterion_long_range(p1, p2, below)); + BOOST_CHECK(!criterion_long_range(p1, p2, above)); + p2.dipm() = 1.; + BOOST_CHECK(!criterion_long_range(p1, p2, below)); + BOOST_CHECK(!criterion_long_range(p1, p2, above)); + p1.dipm() = 1.; + BOOST_CHECK(criterion_long_range(p1, p2, below)); + BOOST_CHECK(!criterion_long_range(p1, p2, above)); + p1.dipm() = 0.; + p2.dipm() = 0.; + } +#endif // DIPOLES + +#ifdef COLLISION_DETECTION + { + auto constexpr cutoff = skin + collision_cut; + auto const below = Distance{Utils::Vector3d{cutoff - 0.1, 0.0, 0.0}}; + auto const above = Distance{Utils::Vector3d{cutoff + 0.1, 0.0, 0.0}}; + BOOST_CHECK(criterion_long_range(p1, p2, below)); + BOOST_CHECK(!criterion_long_range(p1, p2, above)); + } +#endif // COLLISION_DETECTION +} diff --git a/src/core/unit_tests/Verlet_list_test.cpp b/src/core/unit_tests/Verlet_list_test.cpp index c5b9e202a93..de340f6f593 100644 --- a/src/core/unit_tests/Verlet_list_test.cpp +++ b/src/core/unit_tests/Verlet_list_test.cpp @@ -124,11 +124,11 @@ struct : public IntegratorHelper { } // namespace Testing inline double get_dist_from_last_verlet_update(Particle const &p) { - return (p.r.p - p.l.p_old).norm(); + return (p.pos() - p.pos_at_last_verlet_update()).norm(); } inline double get_dist_from_pair(Particle const &p1, Particle const &p2) { - return (p1.r.p - p2.r.p).norm(); + return (p1.pos() - p2.pos()).norm(); } auto const node_grids = std::vector{{4, 1, 1}, {2, 2, 1}}; @@ -148,7 +148,6 @@ BOOST_DATA_TEST_CASE_F(ParticleFactory, verlet_list_update, boost::mpi::communicator world; auto const box_l = 8.; - auto const box_center = box_l / 2.; espresso::system->set_box_l(Utils::Vector3d::broadcast(box_l)); espresso::system->set_node_grid(node_grid); @@ -207,10 +206,10 @@ BOOST_DATA_TEST_CASE_F(ParticleFactory, verlet_list_update, mpi_integrate(1, 0); auto const &p1 = get_particle_data(pid1); auto const &p2 = get_particle_data(pid2); - BOOST_CHECK_CLOSE(p1.f.f[0] - p1.p.ext_force[0], 480., 1e-9); - BOOST_CHECK_CLOSE(p1.f.f[1], 0., tol); - BOOST_CHECK_CLOSE(p1.f.f[2], 0., tol); - BOOST_TEST(p1.f.f - p1.p.ext_force == -p2.f.f, + BOOST_CHECK_CLOSE(p1.force()[0] - p1.ext_force()[0], 480., 1e-9); + BOOST_CHECK_CLOSE(p1.force()[1], 0., tol); + BOOST_CHECK_CLOSE(p1.force()[2], 0., tol); + BOOST_TEST(p1.force() - p1.ext_force() == -p2.force(), boost::test_tools::per_element()); BOOST_CHECK_LT(get_dist_from_last_verlet_update(p1), skin / 2.); } diff --git a/src/core/unit_tests/energy_test.cpp b/src/core/unit_tests/energy_test.cpp index 8cd7cc7710c..81d34c20357 100644 --- a/src/core/unit_tests/energy_test.cpp +++ b/src/core/unit_tests/energy_test.cpp @@ -21,47 +21,53 @@ #define BOOST_TEST_DYN_LINK #include +#include "Particle.hpp" #include "energy_inline.hpp" -BOOST_AUTO_TEST_CASE(translational_kinetic_energy_) { +#include "utils/Vector.hpp" - Particle p; +BOOST_AUTO_TEST_CASE(translational_kinetic_energy_) { + // real particle + { + Particle p; #ifdef MASS - p.p.mass = 2.; + p.mass() = 2.; #endif - p.m.v = {3., 4., 5.}; + p.v() = {3., 4., 5.}; - auto const expected = 0.5 * p.p.mass * p.m.v.norm2(); - BOOST_TEST(translational_kinetic_energy(p) == expected); + auto const expected = 0.5 * p.mass() * p.v().norm2(); + BOOST_CHECK_EQUAL(translational_kinetic_energy(p), expected); + } -/* virtual */ -#ifdef VIRTUAL_SITES + // virtual particle { +#ifdef VIRTUAL_SITES + Particle p; #ifdef MASS - p.p.mass = 2.; + p.mass() = 2.; #endif - p.p.is_virtual = true; - p.m.v = {3., 4., 5.}; + p.set_virtual(true); + p.v() = {3., 4., 5.}; auto const expected = 0.; - BOOST_TEST(translational_kinetic_energy(p) == expected); - } + BOOST_CHECK_EQUAL(translational_kinetic_energy(p), expected); #endif + } } BOOST_AUTO_TEST_CASE(rotational_kinetic_energy_) { - BOOST_TEST(rotational_kinetic_energy(Particle{}) == 0.); + BOOST_CHECK_EQUAL(rotational_kinetic_energy(Particle{}), 0.); #ifdef ROTATION { Particle p; - p.m.omega = {1., 2., 3.}; - p.p.rotation = 1; + p.omega() = {1., 2., 3.}; + p.set_can_rotate_all_axes(); auto const expected = - 0.5 * (hadamard_product(p.m.omega, p.m.omega) * p.p.rinertia); - BOOST_TEST(rotational_kinetic_energy(p) == expected); + 0.5 * (hadamard_product(p.omega(), p.omega()) * p.rinertia()); + BOOST_CHECK_EQUAL(rotational_kinetic_energy(p), expected); } #endif } @@ -69,16 +75,16 @@ BOOST_AUTO_TEST_CASE(rotational_kinetic_energy_) { BOOST_AUTO_TEST_CASE(kinetic_energy_) { Particle p; #ifdef MASS - p.p.mass = 2.; + p.mass() = 2.; #endif - p.m.v = {3., 4., 5.}; + p.v() = {3., 4., 5.}; #ifdef ROTATION - p.m.omega = {1., 2., 3.}; - p.p.rotation = 1; + p.omega() = {1., 2., 3.}; + p.set_can_rotate_all_axes(); #endif auto const expected = translational_kinetic_energy(p) + rotational_kinetic_energy(p); - BOOST_TEST(calc_kinetic_energy(p) == expected); -} \ No newline at end of file + BOOST_CHECK_EQUAL(calc_kinetic_energy(p), expected); +} diff --git a/src/core/unit_tests/field_coupling_force_field_test.cpp b/src/core/unit_tests/field_coupling_force_field_test.cpp index 145b673eae3..c35d12efcf6 100644 --- a/src/core/unit_tests/field_coupling_force_field_test.cpp +++ b/src/core/unit_tests/field_coupling_force_field_test.cpp @@ -25,7 +25,6 @@ #include "field_coupling/detail/Base.hpp" #include "field_coupling/detail/BindCoupling.hpp" -using namespace FieldCoupling; #include @@ -65,10 +64,13 @@ BOOST_AUTO_TEST_CASE(BindCoupling_test) { { auto const id = Id{}; auto const bc = make_bind_coupling(id, Particle{}); - const int x = 5; + auto const x = 5; - BOOST_CHECK(5 == bc(5)); - BOOST_CHECK(id.count == 1); + BOOST_CHECK_EQUAL(id.count, 0); + BOOST_CHECK_EQUAL(bc(5), 5); + BOOST_CHECK_EQUAL(id.count, 1); + BOOST_CHECK_EQUAL(bc(x), x); + BOOST_CHECK_EQUAL(id.count, 2); } } @@ -93,13 +95,13 @@ BOOST_AUTO_TEST_CASE(FieldBase_test) { /* ctor copy */ { - int c = 1; - int f = 2; + auto const c = 1; + auto const f = 2; auto base = Base(c, f); - BOOST_CHECK(1 == base.coupling()); - BOOST_CHECK(2 == base.field()); + BOOST_CHECK_EQUAL(base.coupling(), c); + BOOST_CHECK_EQUAL(base.field(), f); } } @@ -110,13 +112,14 @@ struct DummyVectorField { }; BOOST_AUTO_TEST_CASE(ForceField_test) { + using FieldCoupling::ForceField; auto ff = ForceField, DummyVectorField>(Id{}, DummyVectorField{}); - const Utils::Vector3d x{1., 2., 3.}; - const int p = 5; + auto const x = Utils::Vector3d{1., 2., 3.}; - BOOST_CHECK((9. * x) == ff.force(5, x, 9.)); - BOOST_CHECK(1 == ff.coupling().count); + BOOST_CHECK_EQUAL(ff.coupling().count, 0); + BOOST_CHECK_EQUAL(ff.force(5, x, 9.), 9. * x); + BOOST_CHECK_EQUAL(ff.coupling().count, 1); } struct DummyScalarField { @@ -129,13 +132,15 @@ struct DummyScalarField { }; BOOST_AUTO_TEST_CASE(PotentialField_test) { + using FieldCoupling::PotentialField; auto pf = PotentialField, DummyScalarField>(Id{}, DummyScalarField{}); - const Utils::Vector3d x{1., 2., 3.}; + auto const x = Utils::Vector3d{1., 2., 3.}; + BOOST_CHECK_EQUAL(pf.coupling().count, 0); - BOOST_CHECK((2. * x.norm()) == pf.energy(5, x, 2.)); - BOOST_CHECK(1 == pf.coupling().count); + BOOST_CHECK_EQUAL(pf.energy(5, x, 2.), 2. * x.norm()); + BOOST_CHECK_EQUAL(pf.coupling().count, 1); - BOOST_CHECK(-(3. * x) == pf.force(5, x, 0)); - BOOST_CHECK(2 == pf.coupling().count); + BOOST_CHECK_EQUAL(pf.force(5, x, 0), -3. * x); + BOOST_CHECK_EQUAL(pf.coupling().count, 2); } diff --git a/src/core/unit_tests/lb_exceptions.cpp b/src/core/unit_tests/lb_exceptions.cpp index 8f43f20d67b..5844fe1e2d0 100644 --- a/src/core/unit_tests/lb_exceptions.cpp +++ b/src/core/unit_tests/lb_exceptions.cpp @@ -21,6 +21,7 @@ #define BOOST_TEST_DYN_LINK #include +#include "grid_based_algorithms/lb.hpp" #include "grid_based_algorithms/lb_interface.hpp" #include "grid_based_algorithms/lb_interpolation.hpp" #include "grid_based_algorithms/lb_particle_coupling.hpp" @@ -84,4 +85,16 @@ BOOST_AUTO_TEST_CASE(exceptions) { BOOST_CHECK_THROW(lb_lbfluid_get_interpolated_density({}), std::exception); ::lattice_switch = ActiveLB::NONE; mpi_set_interpolation_order_local(InterpolationOrder::linear); +#ifdef ADDITIONAL_CHECKS + { + std::stringstream stream_xy{}; + log_buffer_diff(stream_xy, 0, 1, 2, 3, -1); + BOOST_CHECK_EQUAL(stream_xy.str(), + "buffers differ in dir=0 at node index=1 x=2 y=3\n"); + std::stringstream stream_xyz{}; + log_buffer_diff(stream_xyz, 0, 1, 2, 3, 4); + BOOST_CHECK_EQUAL(stream_xyz.str(), + "buffers differ in dir=0 at node index=1 x=2 y=3 z=4\n"); + } +#endif // ADDITIONAL_CHECKS } diff --git a/src/core/unit_tests/link_cell_test.cpp b/src/core/unit_tests/link_cell_test.cpp index d64406eac39..1dd2c2c9589 100644 --- a/src/core/unit_tests/link_cell_test.cpp +++ b/src/core/unit_tests/link_cell_test.cpp @@ -50,7 +50,7 @@ BOOST_AUTO_TEST_CASE(link_cell) { c.particles().resize(n_part_per_cell); for (auto &p : c.particles()) { - p.p.identity = id++; + p.id() = id++; } } @@ -59,8 +59,8 @@ BOOST_AUTO_TEST_CASE(link_cell) { Algorithm::link_cell(cells.begin(), cells.end(), [&lc_pairs](Particle const &p1, Particle const &p2) { - if (p1.p.identity <= p2.p.identity) - lc_pairs.emplace_back(p1.p.identity, p2.p.identity); + if (p1.id() <= p2.id()) + lc_pairs.emplace_back(p1.id(), p2.id()); }); BOOST_CHECK(lc_pairs.size() == (n_part * (n_part - 1) / 2)); diff --git a/src/core/unit_tests/p3m_test.cpp b/src/core/unit_tests/p3m_test.cpp index 37f2afaa9fc..d62e86deaae 100644 --- a/src/core/unit_tests/p3m_test.cpp +++ b/src/core/unit_tests/p3m_test.cpp @@ -29,10 +29,11 @@ BOOST_AUTO_TEST_CASE(calc_meshift_false) { std::array, 3> const ref = { - std::vector{0}, std::vector{0, 1, -2, -1}, - std::vector{0, 1, 2, 3, -3, -2, -1}}; + {std::vector{0}, std::vector{0, 1, -2, -1}, + std::vector{0, 1, 2, 3, -3, -2, -1}}}; - auto const val = detail::calc_meshift({1, 4, 7}, false); + int const mesh[3] = {1, 4, 7}; + auto const val = detail::calc_meshift(mesh, false); for (std::size_t i = 0; i < 3; ++i) { for (std::size_t j = 0; j < ref[i].size(); ++j) { @@ -43,10 +44,11 @@ BOOST_AUTO_TEST_CASE(calc_meshift_false) { BOOST_AUTO_TEST_CASE(calc_meshift_true) { std::array, 3> const ref = { - std::vector{0}, std::vector{0, 1, 0, -1}, - std::vector{0, 1, 2, 0, -3, -2, -1}}; + {std::vector{0}, std::vector{0, 1, 0, -1}, + std::vector{0, 1, 2, 0, -3, -2, -1}}}; - auto const val = detail::calc_meshift({1, 4, 7}, true); + int const mesh[3] = {1, 4, 7}; + auto const val = detail::calc_meshift(mesh, true); for (std::size_t i = 0; i < 3; ++i) { for (std::size_t j = 0; j < ref[i].size(); ++j) { diff --git a/src/core/unit_tests/random_test.cpp b/src/core/unit_tests/random_test.cpp index eef455f911f..a0dac3e720a 100644 --- a/src/core/unit_tests/random_test.cpp +++ b/src/core/unit_tests/random_test.cpp @@ -35,7 +35,7 @@ BOOST_AUTO_TEST_CASE(test_noise_statistics) { constexpr std::size_t const sample_size = 60'000; - constexpr std::size_t const x = 0, y = 1, z = 2; + constexpr std::size_t const x = 0, y = 1; constexpr double const tol = 1e-12; double value = 1; @@ -45,7 +45,7 @@ BOOST_AUTO_TEST_CASE(test_noise_statistics) { std::tie(means, variances, covariance, correlation) = noise_statistics( [&value]() -> std::array { value *= -1; - return {Utils::Vector2d{value, -value}}; + return {{Utils::Vector2d{value, -value}}}; }, sample_size); // check pooled mean and variance @@ -71,7 +71,7 @@ BOOST_AUTO_TEST_CASE(test_noise_uniform_1d) { std::vector> correlation; std::tie(means, variances, covariance, correlation) = noise_statistics( [counter = 0]() mutable -> std::array { - return {Random::noise_uniform(counter++, 0, 1)}; + return {{Random::noise_uniform(counter++, 0, 1)}}; }, sample_size); // check pooled mean and variance @@ -88,7 +88,7 @@ BOOST_AUTO_TEST_CASE(test_noise_uniform_3d) { std::vector> correlation; std::tie(means, variances, covariance, correlation) = noise_statistics( [counter = 0]() mutable -> std::array { - return {Random::noise_uniform(counter++, 1, 0)}; + return {{Random::noise_uniform(counter++, 1, 0)}}; }, sample_size); // check pooled mean and variance @@ -113,8 +113,8 @@ BOOST_AUTO_TEST_CASE(test_noise_gaussian_4d) { std::vector> correlation; std::tie(means, variances, covariance, correlation) = noise_statistics( [counter = 0]() mutable -> std::array { - return { - Random::noise_gaussian(counter++, 0, 0)}; + return {{Random::noise_gaussian(counter++, 0, + 0)}}; }, sample_size); // check pooled mean and variance @@ -147,9 +147,9 @@ BOOST_AUTO_TEST_CASE(test_uncorrelated_consecutive_ids) { [counter = 0]() mutable -> std::array { counter++; auto prng = Random::noise_uniform; - return {prng(counter, seed, pid, 0), - prng(counter, seed, pid + pid_offset, 0), - prng(counter + pid_offset, seed, pid, 0)}; + return {{prng(counter, seed, pid, 0), + prng(counter, seed, pid + pid_offset, 0), + prng(counter + pid_offset, seed, pid, 0)}}; }, sample_size)); // check correlation @@ -171,9 +171,9 @@ BOOST_AUTO_TEST_CASE(test_uncorrelated_consecutive_seeds) { [counter = 0]() mutable -> std::array { counter++; auto prng = Random::noise_uniform; - return {prng(counter, seed, pid, 0), - prng(counter, seed + seed_offset, pid, 0), - prng(counter + seed_offset, seed, pid, 0)}; + return {{prng(counter, seed, pid, 0), + prng(counter, seed + seed_offset, pid, 0), + prng(counter + seed_offset, seed, pid, 0)}}; }, sample_size)); // check correlation diff --git a/src/core/unit_tests/thermostats_test.cpp b/src/core/unit_tests/thermostats_test.cpp index fb65d0ad0bc..90124f370cc 100644 --- a/src/core/unit_tests/thermostats_test.cpp +++ b/src/core/unit_tests/thermostats_test.cpp @@ -46,10 +46,10 @@ constexpr auto tol = 6 * 100 * std::numeric_limits::epsilon(); Particle particle_factory() { Particle p{}; - p.p.identity = 0; - p.f.f = {1.0, 2.0, 3.0}; + p.id() = 0; + p.force() = {1.0, 2.0, 3.0}; #ifdef ROTATION - p.f.torque = 4.0 * p.f.f; + p.torque() = 4.0 * p.force(); #endif return p; } @@ -72,7 +72,7 @@ BOOST_AUTO_TEST_CASE(test_brownian_dynamics) { constexpr double kT = 3.0; auto const brownian = thermostat_factory(kT); auto const dispersion = - hadamard_division(particle_factory().f.f, brownian.gamma); + hadamard_division(particle_factory().force(), brownian.gamma); /* check translation */ { @@ -120,7 +120,7 @@ BOOST_AUTO_TEST_CASE(test_brownian_dynamics) { #ifdef ROTATION auto const dispersion_rotation = - hadamard_division(particle_factory().f.torque, brownian.gamma_rotation); + hadamard_division(particle_factory().torque(), brownian.gamma_rotation); /* check rotation */ { @@ -185,10 +185,10 @@ BOOST_AUTO_TEST_CASE(test_langevin_dynamics) { /* check translation */ { auto p = particle_factory(); - p.m.v = {1.0, 2.0, 3.0}; + p.v() = {1.0, 2.0, 3.0}; auto const noise = Random::noise_uniform(0, 0, 0); auto const pref = sqrt(prefactor_squared * langevin.gamma); - auto const ref = hadamard_product(-langevin.gamma, p.m.v) + + auto const ref = hadamard_product(-langevin.gamma, p.v()) + hadamard_product(pref, noise); auto const out = friction_thermo_langevin(langevin, p, time_step, kT); BOOST_CHECK_CLOSE(out[0], ref[0], tol); @@ -200,10 +200,10 @@ BOOST_AUTO_TEST_CASE(test_langevin_dynamics) { /* check rotation */ { auto p = particle_factory(); - p.m.omega = {1.0, 2.0, 3.0}; + p.omega() = {1.0, 2.0, 3.0}; auto const noise = Random::noise_uniform(0, 0, 0); auto const pref = sqrt(prefactor_squared * langevin.gamma_rotation); - auto const ref = hadamard_product(-langevin.gamma_rotation, p.m.omega) + + auto const ref = hadamard_product(-langevin.gamma_rotation, p.omega()) + hadamard_product(pref, noise); auto const out = friction_thermo_langevin_rotation(langevin, p, time_step, kT); @@ -221,15 +221,15 @@ BOOST_AUTO_TEST_CASE(test_noise_statistics) { auto thermostat = thermostat_factory(kT, time_step); auto p1 = particle_factory(); auto p2 = particle_factory(); - p1.p.identity = 0; - p2.p.identity = 1; + p1.id() = 0; + p2.id() = 1; auto const correlation = std::get<3>(noise_statistics( [&p1, &p2, &thermostat]() -> std::array { thermostat.rng_increment(); - return {friction_thermo_langevin(thermostat, p1, time_step, kT), - -friction_thermo_langevin(thermostat, p1, time_step, kT), - friction_thermo_langevin(thermostat, p2, time_step, kT)}; + return {{friction_thermo_langevin(thermostat, p1, time_step, kT), + -friction_thermo_langevin(thermostat, p1, time_step, kT), + friction_thermo_langevin(thermostat, p2, time_step, kT)}}; }, sample_size)); for (std::size_t i = 0; i < correlation.size(); ++i) { @@ -254,7 +254,7 @@ BOOST_AUTO_TEST_CASE(test_brownian_randomness) { auto thermostat = thermostat_factory(kT); auto p = particle_factory(); #ifdef ROTATION - p.p.rotation = ROTATION_X | ROTATION_Y | ROTATION_Z; + p.set_can_rotate_all_axes(); constexpr std::size_t N = 4; #else constexpr std::size_t N = 2; @@ -263,14 +263,14 @@ BOOST_AUTO_TEST_CASE(test_brownian_randomness) { auto const correlation = std::get<3>(noise_statistics( [&p, &thermostat]() -> std::array { thermostat.rng_increment(); - return { + return {{ bd_random_walk(thermostat, p, time_step, kT), bd_random_walk_vel(thermostat, p), #ifdef ROTATION bd_random_walk_rot(thermostat, p, time_step, kT), bd_random_walk_vel_rot(thermostat, p), #endif - }; + }}; }, sample_size)); for (std::size_t i = 0; i < correlation.size(); ++i) { @@ -295,12 +295,12 @@ BOOST_AUTO_TEST_CASE(test_langevin_randomness) { auto const correlation = std::get<3>(noise_statistics( [&p, &thermostat]() -> std::array { thermostat.rng_increment(); - return { + return {{ friction_thermo_langevin(thermostat, p, time_step, kT), #ifdef ROTATION friction_thermo_langevin_rotation(thermostat, p, time_step, kT), #endif - }; + }}; }, sample_size)); for (std::size_t i = 0; i < correlation.size(); ++i) { @@ -327,11 +327,11 @@ BOOST_AUTO_TEST_CASE(test_npt_iso_randomness) { auto const correlation = std::get<3>(noise_statistics( [&p, &thermostat]() -> std::array { thermostat.rng_increment(); - return { - friction_therm0_nptiso<1>(thermostat, p.m.v, 0), - friction_therm0_nptiso<2>(thermostat, p.m.v, 0), + return {{ + friction_therm0_nptiso<1>(thermostat, p.v(), 0), + friction_therm0_nptiso<2>(thermostat, p.v(), 0), friction_thermV_nptiso(thermostat, 1.5), - }; + }}; }, sample_size)); for (std::size_t i = 0; i < correlation.size(); ++i) { @@ -341,3 +341,15 @@ BOOST_AUTO_TEST_CASE(test_npt_iso_randomness) { } } #endif // NPT + +BOOST_AUTO_TEST_CASE(test_predicate) { + std::vector> const correlation = {{1., 0.}, {0., 1.}}; + auto const is_true = correlation_almost_equal(correlation, 0, 0, 1., 1e-10); + auto const is_false = correlation_almost_equal(correlation, 0, 1, 1., 1e-10); + BOOST_REQUIRE(is_true); + BOOST_REQUIRE(!is_false); + BOOST_CHECK_EQUAL(is_true.message(), ""); + BOOST_CHECK_EQUAL(is_false.message(), + "The correlation coefficient M[0][1]{0} " + "differs from 1 by 1 (> 1e-10)"); +} diff --git a/src/core/virtual_sites.cpp b/src/core/virtual_sites.cpp index 63f56725d1d..66325a2c19c 100644 --- a/src/core/virtual_sites.cpp +++ b/src/core/virtual_sites.cpp @@ -58,7 +58,7 @@ inline std::tuple, double> calculate_vs_relate_to_params(Particle const &p_current, Particle const &p_relate_to) { // get the distance between the particles - Utils::Vector3d d = box_geo.get_mi_vector(p_current.r.p, p_relate_to.r.p); + Utils::Vector3d d = box_geo.get_mi_vector(p_current.pos(), p_relate_to.pos()); // Check if the distance between virtual and non-virtual particles is larger // than minimum global cutoff. If so, warn user. @@ -99,24 +99,25 @@ calculate_vs_relate_to_params(Particle const &p_current, Utils::convert_director_to_quaternion(d); // Define quaternion as described above - quat = - Utils::Quaternion{Utils::dot(p_relate_to.r.quat, quat_director), - -quat_director[0] * p_relate_to.r.quat[1] + - quat_director[1] * p_relate_to.r.quat[0] + - quat_director[2] * p_relate_to.r.quat[3] - - quat_director[3] * p_relate_to.r.quat[2], - p_relate_to.r.quat[1] * quat_director[3] + - p_relate_to.r.quat[0] * quat_director[2] - - p_relate_to.r.quat[3] * quat_director[1] - - p_relate_to.r.quat[2] * quat_director[0], - quat_director[3] * p_relate_to.r.quat[0] - - p_relate_to.r.quat[3] * quat_director[0] + - p_relate_to.r.quat[2] * quat_director[1] - - p_relate_to.r.quat[1] * quat_director[2]}; - quat /= p_relate_to.r.quat.norm2(); + auto relate_to_quat = p_relate_to.quat(); + quat = Utils::Quaternion{ + {{{Utils::dot(relate_to_quat, quat_director), + -quat_director[0] * relate_to_quat[1] + + quat_director[1] * relate_to_quat[0] + + quat_director[2] * relate_to_quat[3] - + quat_director[3] * relate_to_quat[2], + relate_to_quat[1] * quat_director[3] + + relate_to_quat[0] * quat_director[2] - + relate_to_quat[3] * quat_director[1] - + relate_to_quat[2] * quat_director[0], + quat_director[3] * relate_to_quat[0] - + relate_to_quat[3] * quat_director[0] + + relate_to_quat[2] * quat_director[1] - + relate_to_quat[1] * quat_director[2]}}}}; + quat /= relate_to_quat.norm2(); // Verify result - Utils::Quaternion qtemp = p_relate_to.r.quat * quat; + Utils::Quaternion qtemp = relate_to_quat * quat; for (int i = 0; i < 4; i++) if (fabs(qtemp[i] - quat_director[i]) > 1E-9) fprintf(stderr, "vs_relate_to: component %d: %f instead of %f\n", i, @@ -128,9 +129,9 @@ calculate_vs_relate_to_params(Particle const &p_current, void local_vs_relate_to(Particle &p_current, Particle const &p_relate_to) { // Set the particle id of the particle we want to relate to, the distance // and the relative orientation - p_current.p.vs_relative.to_particle_id = p_relate_to.identity(); - std::tie(p_current.p.vs_relative.rel_orientation, - p_current.p.vs_relative.distance) = + p_current.vs_relative().to_particle_id = p_relate_to.identity(); + std::tie(p_current.vs_relative().rel_orientation, + p_current.vs_relative().distance) = calculate_vs_relate_to_params(p_current, p_relate_to); } diff --git a/src/core/virtual_sites/VirtualSites.hpp b/src/core/virtual_sites/VirtualSites.hpp index 56cb97044a5..28a79c9f2de 100644 --- a/src/core/virtual_sites/VirtualSites.hpp +++ b/src/core/virtual_sites/VirtualSites.hpp @@ -32,6 +32,8 @@ * - update virtual sites */ +#include + #ifdef VIRTUAL_SITES #include #include @@ -49,15 +51,15 @@ class VirtualSites { /** Back-transfer forces (and torques) to non-virtual particles. */ virtual void back_transfer_forces_and_torques() const {} /** @brief Called after force calculation (and before rattle/shake) */ - virtual void after_force_calc(){}; - virtual void after_lb_propagation(double){}; + virtual void after_force_calc() {} + virtual void after_lb_propagation(double) {} /** @brief Pressure contribution. */ - virtual Utils::Matrix pressure_tensor() const { return {}; }; + virtual Utils::Matrix pressure_tensor() const { return {}; } /** @brief Enable/disable quaternion calculations for vs.*/ void set_have_quaternion(const bool &have_quaternion) { m_have_quaternion = have_quaternion; - }; - bool get_have_quaternion() const { return m_have_quaternion; }; + } + bool have_quaternions() const { return m_have_quaternion; } private: bool m_have_quaternion = false; diff --git a/src/core/virtual_sites/VirtualSitesInertialessTracers.cpp b/src/core/virtual_sites/VirtualSitesInertialessTracers.cpp index fcd2396c148..a19dfae3e14 100644 --- a/src/core/virtual_sites/VirtualSitesInertialessTracers.cpp +++ b/src/core/virtual_sites/VirtualSitesInertialessTracers.cpp @@ -41,7 +41,7 @@ void VirtualSitesInertialessTracers::after_force_calc() { #endif if (std::any_of(cell_structure.local_particles().begin(), cell_structure.local_particles().end(), - [](Particle &p) { return p.p.is_virtual; })) { + [](Particle &p) { return p.is_virtual(); })) { runtimeErrorMsg() << "Inertialess Tracers: No LB method was active but " "virtual sites present."; return; diff --git a/src/core/virtual_sites/VirtualSitesRelative.cpp b/src/core/virtual_sites/VirtualSitesRelative.cpp index 85f0bc1ca55..b8bd7e766d1 100644 --- a/src/core/virtual_sites/VirtualSitesRelative.cpp +++ b/src/core/virtual_sites/VirtualSitesRelative.cpp @@ -30,7 +30,6 @@ #include #include -#include #include #include @@ -44,9 +43,9 @@ namespace { * @return Orientation quaternion of the virtual site. */ Utils::Quaternion -orientation(Particle const *p_ref, +orientation(const Particle &p_ref, const ParticleProperties::VirtualSitesRelativeParameters &vs_rel) { - return p_ref->r.quat * vs_rel.quat; + return p_ref.quat() * vs_rel.quat; } /** @@ -57,7 +56,7 @@ orientation(Particle const *p_ref, */ inline Utils::Vector3d connection_vector( - Particle const *p_ref, + Particle const &p_ref, const ParticleProperties::VirtualSitesRelativeParameters &vs_rel) { // Calculate the quaternion defining the orientation of the vector connecting // the virtual site and the real particle @@ -65,7 +64,7 @@ inline Utils::Vector3d connection_vector( // of the real particle with the quaternion of the virtual particle, which // specifies the relative orientation. auto const director = Utils::convert_quaternion_to_director( - p_ref->r.quat * vs_rel.rel_orientation) + p_ref.quat() * vs_rel.rel_orientation) .normalize(); return vs_rel.distance * director; @@ -78,9 +77,9 @@ inline Utils::Vector3d connection_vector( * @return Position of the virtual site. */ Utils::Vector3d -position(Particle const *p_ref, +position(Particle const &p_ref, const ParticleProperties::VirtualSitesRelativeParameters &vs_rel) { - return p_ref->r.p + connection_vector(p_ref, vs_rel); + return p_ref.pos() + connection_vector(p_ref, vs_rel); } /** @@ -90,16 +89,16 @@ position(Particle const *p_ref, * @return Velocity of the virtual site. */ Utils::Vector3d -velocity(const Particle *p_ref, +velocity(const Particle &p_ref, const ParticleProperties::VirtualSitesRelativeParameters &vs_rel) { auto const d = connection_vector(p_ref, vs_rel); // Get omega of real particle in space-fixed frame auto const omega_space_frame = - convert_vector_body_to_space(*p_ref, p_ref->m.omega); + convert_vector_body_to_space(p_ref, p_ref.omega()); // Obtain velocity from v=v_real particle + omega_real_particle \times // director - return vector_product(omega_space_frame, d) + p_ref->m.v; + return vector_product(omega_space_frame, d) + p_ref.v(); } /** @@ -118,19 +117,6 @@ Particle *get_reference_particle( return p_ref; } -/** - * @brief Constraint force on the real particle. - * - * Calculates the force exerted by the constraint on the - * reference particle. - */ -ParticleForce constraint_force( - const ParticleForce &f, const Particle *p_ref, - const ParticleProperties::VirtualSitesRelativeParameters &vs_rel) { - return {f.f, - vector_product(connection_vector(p_ref, vs_rel), f.f) + f.torque}; -} - /** * @brief Constraint force to hold the particles at its prescribed position. * @@ -140,7 +126,7 @@ ParticleForce constraint_force( * @return Constraint force. */ auto constraint_stress( - const Utils::Vector3d &f, const Particle *p_ref, + const Utils::Vector3d &f, const Particle &p_ref, const ParticleProperties::VirtualSitesRelativeParameters &vs_rel) { /* The constraint force is minus the force on the particle, make it force * free. The counter force is translated by the connection vector to the @@ -155,21 +141,21 @@ void VirtualSitesRelative::update() const { auto const particles = cell_structure.local_particles(); for (auto &p : particles) { - if (!p.p.is_virtual) + if (!p.is_virtual()) continue; - const Particle *p_ref = get_reference_particle(p.p.vs_relative); + const Particle *p_ref = get_reference_particle(p.vs_relative()); - auto const new_pos = position(p_ref, p.p.vs_relative); + auto const new_pos = position(*p_ref, p.vs_relative()); /* The shift has to respect periodic boundaries: if the reference - * particles is not in the same image box, we potentially avoid to shift + * particles is not in the same image box, we potentially avoid shifting * to the other side of the box. */ - p.r.p += box_geo.get_mi_vector(new_pos, p.r.p); + p.pos() += box_geo.get_mi_vector(new_pos, p.pos()); - p.m.v = velocity(p_ref, p.p.vs_relative); + p.v() = velocity(*p_ref, p.vs_relative()); - if (get_have_quaternion()) - p.r.quat = orientation(p_ref, p.p.vs_relative); + if (have_quaternions()) + p.quat() = p_ref->quat() * p.vs_relative().quat; } if (cell_structure.check_resort_required(particles, skin)) { @@ -187,12 +173,16 @@ void VirtualSitesRelative::back_transfer_forces_and_torques() const { // Iterate over all the particles in the local cells for (auto &p : cell_structure.local_particles()) { // We only care about virtual particles - if (p.p.is_virtual) { + if (p.is_virtual()) { // First obtain the real particle responsible for this virtual particle: - Particle *p_ref = get_reference_particle(p.p.vs_relative); + Particle *p_ref = get_reference_particle(p.vs_relative()); // Add forces and torques - p_ref->f += constraint_force(p.f, p_ref, p.p.vs_relative); + p_ref->force() += p.force(); + p_ref->torque() += + vector_product(connection_vector(*p_ref, p.vs_relative()), + p.force()) + + p.torque(); } } } @@ -202,13 +192,13 @@ Utils::Matrix VirtualSitesRelative::pressure_tensor() const { Utils::Matrix pressure_tensor = {}; for (auto &p : cell_structure.local_particles()) { - if (!p.p.is_virtual) + if (!p.is_virtual()) continue; // First obtain the real particle responsible for this virtual particle: - const Particle *p_ref = get_reference_particle(p.p.vs_relative); + const Particle *p_ref = get_reference_particle(p.vs_relative()); - pressure_tensor += constraint_stress(p.f.f, p_ref, p.p.vs_relative); + pressure_tensor += constraint_stress(p.force(), *p_ref, p.vs_relative()); } return pressure_tensor; diff --git a/src/core/virtual_sites/lb_inertialess_tracers.cpp b/src/core/virtual_sites/lb_inertialess_tracers.cpp index c564ad8f24a..3a171dc9c75 100644 --- a/src/core/virtual_sites/lb_inertialess_tracers.cpp +++ b/src/core/virtual_sites/lb_inertialess_tracers.cpp @@ -34,32 +34,25 @@ #include "integrate.hpp" #include "lb_inertialess_tracers_cuda_interface.hpp" +#include +#include #include #include -void CoupleIBMParticleToFluid(Particle &p); +void CoupleIBMParticleToFluid(Particle const &p); void ParticleVelocitiesFromLB_CPU(); -bool IsHalo(int indexCheck); -void GetIBMInterpolatedVelocity(const Utils::Vector3d &p, Utils::Vector3d &v, - Utils::Vector3d &forceAdded); +bool IsHalo(std::size_t indexCheck); -bool *isHaloCache = nullptr; +static bool *isHaloCache = nullptr; -namespace { -bool in_local_domain(Utils::Vector3d const &pos) { - auto const lblattice = lb_lbfluid_get_lattice(); +inline bool in_local_domain(Utils::Vector3d const &pos) { + auto const offset = Utils::Vector3d::broadcast(0.5 * lblattice.agrid); auto const my_left = local_geo.my_left(); auto const my_right = local_geo.my_right(); - return (pos[0] >= my_left[0] - 0.5 * lblattice.agrid && - pos[0] < my_right[0] + 0.5 * lblattice.agrid && - pos[1] >= my_left[1] - 0.5 * lblattice.agrid && - pos[1] < my_right[1] + 0.5 * lblattice.agrid && - pos[2] >= my_left[2] - 0.5 * lblattice.agrid && - pos[2] < my_right[2] + 0.5 * lblattice.agrid); + return pos >= (my_left - offset) and pos < (my_right + offset); } -} // namespace /** Put the calculated force stored on the ibm particles into the fluid by * updating the @ref lbfields structure. @@ -79,8 +72,8 @@ void IBM_ForcesIntoFluid_CPU() { for (auto &p : cell_structure.ghost_particles()) { // for ghost particles we have to check if they lie // in the range of the local lattice nodes - if (in_local_domain(p.r.p)) { - if (p.p.is_virtual) + if (in_local_domain(p.pos())) { + if (p.is_virtual()) CoupleIBMParticleToFluid(p); } } @@ -102,19 +95,13 @@ void IBM_UpdateParticlePositions(ParticleRange const &particles, // Euler integrator for (auto &p : particles) { - if (p.p.is_virtual) { + if (p.is_virtual()) { + for (int axis = 0; axis < 2; axis++) { #ifdef EXTERNAL_FORCES - if (!(p.p.ext_flag & 2)) + if (not p.is_fixed_along(axis)) #endif - p.r.p[0] += p.m.v[0] * time_step; -#ifdef EXTERNAL_FORCES - if (!(p.p.ext_flag & 4)) -#endif - p.r.p[1] += p.m.v[1] * time_step; -#ifdef EXTERNAL_FORCES - if (!(p.p.ext_flag & 8)) -#endif - p.r.p[2] += p.m.v[2] * time_step; + p.pos()[axis] += p.v()[axis] * time_step; + } } } @@ -124,21 +111,21 @@ void IBM_UpdateParticlePositions(ParticleRange const &particles, } /** Put the momentum of a given particle into the LB fluid. */ -void CoupleIBMParticleToFluid(Particle &p) { +void CoupleIBMParticleToFluid(Particle const &p) { // Convert units from MD to LB - auto const delta_j = p.f.f * Utils::sqr(lbpar.tau * lbpar.tau) / lbpar.agrid; + auto const delta_j = p.force() * Utils::int_pow<4>(lbpar.tau) / lbpar.agrid; // Get indices and weights of affected nodes using discrete delta function Utils::Vector node_index{}; Utils::Vector6d delta{}; - lblattice.map_position_to_lattice(p.r.p, node_index, delta); + lblattice.map_position_to_lattice(p.pos(), node_index, delta); // Loop over all affected nodes for (int z = 0; z < 2; z++) { for (int y = 0; y < 2; y++) { for (int x = 0; x < 2; x++) { // Do not put force into a halo node - if (!IsHalo(node_index[(z * 2 + y) * 2 + x])) { + if (!IsHalo(static_cast(node_index[(z * 2 + y) * 2 + x]))) { // Add force into the lbfields structure auto &local_f = lbfields[node_index[(z * 2 + y) * 2 + x]].force_density; @@ -157,17 +144,20 @@ void CoupleIBMParticleToFluid(Particle &p) { * The fluid velocity is obtained by linear interpolation, * cf. eq. (11) in @cite ahlrichs99a. */ -void GetIBMInterpolatedVelocity(const Utils::Vector3d &pos, Utils::Vector3d &v, - Utils::Vector3d &forceAdded) { +template +Utils::Vector3d GetIBMInterpolatedVelocity(Utils::Vector3d const &pos) { + auto const f_ext = + lbpar.ext_force_density * Utils::sqr(lbpar.agrid * lbpar.tau); + /* determine elementary lattice cell surrounding the particle and the relative position of the particle in this cell */ Utils::Vector node_index{}; Utils::Vector6d delta{}; lblattice.map_position_to_lattice(pos, node_index, delta); - Utils::Vector3d interpolated_u = {}; // This for the f/2 contribution to the velocity - forceAdded = {}; + Utils::Vector3d force_added = {}; + Utils::Vector3d interpolated_u = {}; for (int z = 0; z < 2; z++) { for (int y = 0; y < 2; y++) { @@ -184,43 +174,50 @@ void GetIBMInterpolatedVelocity(const Utils::Vector3d &pos, Utils::Vector3d &v, // We probably can even set the boundary velocity directly. #ifdef LB_BOUNDARIES if (lbfields[index].boundary) { - local_density = lbpar.density; - local_j = lbpar.density * - (*LBBoundaries::lbboundaries[lbfields[index].boundary - 1]) - .velocity(); + if (ReturnVelocity) { + local_density = lbpar.density; + auto const i = lbfields[index].boundary - 1; + local_j = lbpar.density * LBBoundaries::lbboundaries[i]->velocity(); + } } else #endif { - auto const modes = lb_calc_modes(index, lbfluid); + auto const modes = lb_calc_modes(static_cast(index), lbfluid); local_density = lbpar.density + modes[0]; - // Add the +f/2 contribution!! - local_j[0] = modes[1] + f[0] / 2.; - local_j[1] = modes[2] + f[1] / 2.; - local_j[2] = modes[3] + f[2] / 2.; - - // Keep track of the forces that we added to the fluid. - // This is necessary for communication because this part is executed - // for real and ghost particles. - // Later on we sum the real and ghost contributions. - auto const f_ext = - lbpar.ext_force_density * Utils::sqr(lbpar.agrid * lbpar.tau); - forceAdded += local_delta * (f - f_ext) / (2. * local_density); + if (ReturnVelocity) { + // Add the +f/2 contribution!! + local_j[0] = modes[1] + f[0] / 2.; + local_j[1] = modes[2] + f[1] / 2.; + local_j[2] = modes[3] + f[2] / 2.; + } else { + // Keep track of the forces that we added to the fluid. + // This is necessary for communication because this part is executed + // for real and ghost particles. + // Later on we sum the real and ghost contributions. + force_added += local_delta * (f - f_ext) / (2. * local_density); + } } // Interpolate velocity - interpolated_u += local_delta * local_j / local_density; + if (ReturnVelocity) { + interpolated_u += local_j * (local_delta / local_density); + } } } } - v = interpolated_u * lbpar.agrid / lbpar.tau; + auto const unit_conversion = lbpar.agrid / lbpar.tau; + if (ReturnVelocity) { + return interpolated_u * unit_conversion; + } + return force_added * unit_conversion; } /** Build a cache structure which contains a flag for each LB node whether * that node is a halo node or not. */ -bool IsHalo(const int indexCheck) { +bool IsHalo(std::size_t indexCheck) { // First call --> build cache if (isHaloCache == nullptr) { isHaloCache = new bool[lblattice.halo_grid_volume]; @@ -228,7 +225,7 @@ bool IsHalo(const int indexCheck) { for (int i = 0; i < lblattice.halo_grid_volume; i++) isHaloCache[i] = true; // Loop through and check where indexCheck occurs - int index = lblattice.halo_offset; + auto index = lblattice.halo_offset; for (int z = 1; z <= lblattice.grid[2]; z++) { for (int y = 1; y <= lblattice.grid[1]; y++) { for (int x = 1; x <= lblattice.grid[0]; x++) { @@ -253,11 +250,10 @@ void ParticleVelocitiesFromLB_CPU() { // Here all contributions are included: velocity, external force and // particle force. for (auto &p : cell_structure.local_particles()) { - if (p.p.is_virtual) { + if (p.is_virtual()) { // Get interpolated velocity and store in the force (!) field // for later communication (see below) - Utils::Vector3d dummy; - GetIBMInterpolatedVelocity(p.r.p, p.f.f, dummy); + p.force() = GetIBMInterpolatedVelocity(p.pos()); } } @@ -267,22 +263,20 @@ void ParticleVelocitiesFromLB_CPU() { // This criterion include the halo on the left, but excludes the halo on // the right // Try if we have to use *1.5 on the right - if (in_local_domain(p.r.p)) { - if (p.p.is_virtual) { - Utils::Vector3d force{}; // The force stemming from the ghost particle - Utils::Vector3d dummy; - GetIBMInterpolatedVelocity(p.r.p, dummy, force); - - // Rescale and store in the force field of the particle (for + if (in_local_domain(p.pos())) { + if (p.is_virtual()) { + // The force stemming from the ghost particle (for // communication, see below) - p.f.f = force * lbpar.agrid / lbpar.tau; + p.force() = GetIBMInterpolatedVelocity(p.pos()); } else { - p.f.f = {}; - } // Reset, necessary because we add all forces below. Also needs to - // be done for the real particles! + // Reset, necessary because we add all forces below. Also needs to + // be done for real particles! + p.force() = {}; + } } else { - p.f.f = {}; - } // Reset, necessary because we add all forces below + // Reset, necessary because we add all forces below + p.force() = {}; + } } // Now the local particles contain a velocity (stored in the force field) @@ -299,8 +293,8 @@ void ParticleVelocitiesFromLB_CPU() { // Transfer to velocity field for (auto &p : cell_structure.local_particles()) { - if (p.p.is_virtual) { - p.m.v = p.f.f; + if (p.is_virtual()) { + p.v() = p.force(); } } } diff --git a/src/core/virtual_sites/lb_inertialess_tracers_cuda_interface.cpp b/src/core/virtual_sites/lb_inertialess_tracers_cuda_interface.cpp index b575e61eeef..5d5ea3b4f1f 100644 --- a/src/core/virtual_sites/lb_inertialess_tracers_cuda_interface.cpp +++ b/src/core/virtual_sites/lb_inertialess_tracers_cuda_interface.cpp @@ -46,17 +46,17 @@ static void pack_particles(ParticleRange const &particles, int i = 0; for (auto const &part : particles) { - auto const pos = folded_position(part.r.p, box_geo); + auto const pos = folded_position(part.pos(), box_geo); buffer[i].pos[0] = static_cast(pos[0]); buffer[i].pos[1] = static_cast(pos[1]); buffer[i].pos[2] = static_cast(pos[2]); - buffer[i].f[0] = static_cast(part.f.f[0]); - buffer[i].f[1] = static_cast(part.f.f[1]); - buffer[i].f[2] = static_cast(part.f.f[2]); + buffer[i].f[0] = static_cast(part.force()[0]); + buffer[i].f[1] = static_cast(part.force()[1]); + buffer[i].f[2] = static_cast(part.force()[2]); - buffer[i].is_virtual = part.p.is_virtual; + buffer[i].is_virtual = part.is_virtual(); i++; } @@ -89,9 +89,9 @@ static void set_velocities(ParticleRange const &particles, std::vector &buffer) { int i = 0; for (auto &part : particles) { - if (part.p.is_virtual) { + if (part.is_virtual()) { for (int j = 0; j < 3; j++) - part.m.v[j] = static_cast(buffer[i].v[j]); + part.v()[j] = static_cast(buffer[i].v[j]); } i++; } @@ -101,7 +101,7 @@ static void set_velocities(ParticleRange const &particles, * nodes. Analogous to @ref cuda_mpi_send_forces. */ void IBM_cuda_mpi_send_velocities(ParticleRange const &particles) { - auto const n_part = particles.size(); + auto const n_part = static_cast(particles.size()); if (this_node > 0) { static std::vector buffer; diff --git a/src/particle_observables/include/particle_observables/algorithms.hpp b/src/particle_observables/include/particle_observables/algorithms.hpp index 6a18740f01e..21c0c63c094 100644 --- a/src/particle_observables/include/particle_observables/algorithms.hpp +++ b/src/particle_observables/include/particle_observables/algorithms.hpp @@ -16,17 +16,21 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef ALGORITHMS_HPP -#define ALGORITHMS_HPP +#ifndef SRC_PARTICLE_OBSERVABLES_ALGORITHMS_HPP +#define SRC_PARTICLE_OBSERVABLES_ALGORITHMS_HPP + +/** + * @file + * + * Generic algorithms for the calculation of particle + * property derived observables. + */ + #include #include #include #include -/** @file algorithms.hpp - * This file contains generic algorithms for the calculation of particle - * property derived observables. - */ namespace ParticleObservables { namespace detail { struct One { @@ -94,4 +98,4 @@ template struct Map { } }; } // namespace ParticleObservables -#endif // ALGORITHMS_HPP +#endif // SRC_PARTICLE_OBSERVABLES_ALGORITHMS_HPP diff --git a/src/particle_observables/include/particle_observables/observable.hpp b/src/particle_observables/include/particle_observables/observable.hpp index 7c928fcc2ef..2f5eb0d2e5f 100644 --- a/src/particle_observables/include/particle_observables/observable.hpp +++ b/src/particle_observables/include/particle_observables/observable.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef INCLUDE_OBSERVABLES_OBSERVABLE_HPP -#define INCLUDE_OBSERVABLES_OBSERVABLE_HPP +#ifndef SRC_PARTICLE_OBSERVABLES_OBSERVABLE_HPP +#define SRC_PARTICLE_OBSERVABLES_OBSERVABLE_HPP #include "algorithms.hpp" #include "properties.hpp" @@ -54,4 +54,4 @@ using Positions = Map; using Velocities = Map; } // namespace ParticleObservables -#endif // INCLUDE_OBSERVABLES_OBSERVABLE_HPP +#endif // SRC_PARTICLE_OBSERVABLES_OBSERVABLE_HPP diff --git a/src/particle_observables/tests/algorithms.cpp b/src/particle_observables/tests/algorithms.cpp index 597265677c2..1418ccdf6aa 100644 --- a/src/particle_observables/tests/algorithms.cpp +++ b/src/particle_observables/tests/algorithms.cpp @@ -23,6 +23,8 @@ #include #include +#include +#include #include #include @@ -38,34 +40,61 @@ struct One { struct PlusOne { template auto operator()(Particle const &p) { return p + 1; } }; + +template +T average(std::vector const &value, std::size_t denominator) { + auto const sum = std::accumulate(value.begin(), value.end(), T{0}); + return static_cast(sum) / static_cast(denominator); +} + } // namespace Testing -BOOST_AUTO_TEST_CASE(algorithms) { - std::vector values{1, 2, 3, 4}; +BOOST_AUTO_TEST_CASE(algorithms_integer) { + std::vector const values{1, 2, 3, 4}; { auto const res = WeightedAverage()(values); - BOOST_CHECK(res == std::accumulate(values.begin(), values.end(), 0) / - values.size()); + BOOST_CHECK_EQUAL(res, Testing::average(values, values.size())); } { auto const res = WeightedAverage()(values); - BOOST_CHECK(res == (1 * 2 + 2 * 3 + 3 * 4 + 4 * 5) / 14); - auto const res2 = - WeightedSum()(values); - BOOST_CHECK(res2 == (1 * 2 + 2 * 3 + 3 * 4 + 4 * 5)); + BOOST_CHECK_EQUAL(res, (1 * 2 + 2 * 3 + 3 * 4 + 4 * 5) / 14); + } + { + auto const res = WeightedSum()(values); + BOOST_CHECK_EQUAL(res, (1 * 2 + 2 * 3 + 3 * 4 + 4 * 5)); + } + { + auto const res = Average()(values); + BOOST_CHECK_EQUAL(res, Testing::average(values, values.size())); + } + { + auto const res = Sum{}(values); + BOOST_CHECK_EQUAL(res, Testing::average(values, 1u)); + } + { + auto const res = Map{}(values); + BOOST_TEST(res == values); + } +} + +BOOST_AUTO_TEST_CASE(algorithms_double) { + auto constexpr tol = 100 * std::numeric_limits::epsilon(); + std::vector const values{1., 2., 3., 4.}; + { + auto const res = WeightedAverage()(values); + BOOST_CHECK_CLOSE(res, Testing::average(values, values.size()), tol); } { auto const res = Average()(values); - BOOST_CHECK(res == std::accumulate(values.begin(), values.end(), 0) / - values.size()); + BOOST_CHECK_EQUAL(res, Testing::average(values, values.size())); } { auto const res = Sum{}(values); - BOOST_CHECK(res == std::accumulate(values.begin(), values.end(), 0)); + BOOST_CHECK_EQUAL(res, Testing::average(values, 1u)); } { auto const res = Map{}(values); - BOOST_CHECK(res == values); + BOOST_TEST(res == values); } } diff --git a/src/python/espressomd/actors.pyx b/src/python/espressomd/actors.pyx index 31e63852ad2..13f34a636b3 100644 --- a/src/python/espressomd/actors.pyx +++ b/src/python/espressomd/actors.pyx @@ -17,6 +17,7 @@ include "myconfig.pxi" from .highlander import ThereCanOnlyBeOne from .utils import handle_errors +from . import utils cdef class Actor: @@ -45,20 +46,10 @@ cdef class Actor: def __init__(self, *args, **kwargs): self._isactive = False + utils.check_valid_keys(self.valid_keys(), kwargs.keys()) + utils.check_required_keys(self.required_keys(), kwargs.keys()) self._params = self.default_params() - - # Check if all required keys are given - for k in self.required_keys(): - if k not in kwargs: - raise ValueError( - "At least the following keys have to be given as keyword arguments: " + self.required_keys().__str__() + " got " + kwargs.__str__()) - self._params[k] = kwargs[k] - - for k in kwargs: - if k in self.valid_keys(): - self._params[k] = kwargs[k] - else: - raise KeyError(f"{k} is not a valid key") + self._params.update(kwargs) def _activate(self): inter = self._get_interaction_type() @@ -107,18 +98,12 @@ cdef class Actor: def set_params(self, **p): """Update the given parameters.""" # Check if keys are valid - for k in p.keys(): - if k not in self.valid_keys(): - raise ValueError( - "Only the following keys are supported: " + self.valid_keys().__str__()) + utils.check_valid_keys(self.valid_keys(), p.keys()) # When an interaction is newly activated, all required keys must be # given if not self.is_active(): - for k in self.required_keys(): - if k not in p: - raise ValueError( - "At least the following keys have to be given as keyword arguments: " + self.required_keys().__str__()) + utils.check_required_keys(self.required_keys(), p.keys()) self._params.update(p) # validate updated parameters diff --git a/src/python/espressomd/cellsystem.pxd b/src/python/espressomd/cellsystem.pxd index ecf56f006c8..48092b5e1fe 100644 --- a/src/python/espressomd/cellsystem.pxd +++ b/src/python/espressomd/cellsystem.pxd @@ -35,17 +35,19 @@ cdef extern from "cells.hpp": cdef extern from "communication.hpp": int n_nodes -cdef extern from "cells.hpp": - int CELL_STRUCTURE_DOMDEC - int CELL_STRUCTURE_NSQUARE +cdef extern from "CellStructureType.hpp": + ctypedef enum CellStructureType: + CELL_STRUCTURE_REGULAR "CellStructureType::CELL_STRUCTURE_REGULAR" + CELL_STRUCTURE_NSQUARE "CellStructureType::CELL_STRUCTURE_NSQUARE" +cdef extern from "cells.hpp": ctypedef struct CellStructure: - int decomposition_type() + CellStructureType decomposition_type() bool use_verlet_list CellStructure cell_structure - const DomainDecomposition * get_domain_decomposition() + const RegularDecomposition * get_regular_decomposition() vector[pair[int, int]] mpi_get_pairs(double distance) except + vector[pair[int, int]] mpi_get_pairs_of_types(double distance, vector[int] types) except + @@ -62,8 +64,8 @@ cdef extern from "integrate.hpp": void mpi_set_skin(double skin) double get_verlet_reuse() -cdef extern from "DomainDecomposition.hpp": - cppclass DomainDecomposition: +cdef extern from "RegularDecomposition.hpp": + cppclass RegularDecomposition: Vector3i cell_grid double cell_size[3] diff --git a/src/python/espressomd/cellsystem.pyx b/src/python/espressomd/cellsystem.pyx index 9291fb0acfd..3c023ea39f7 100644 --- a/src/python/espressomd/cellsystem.pyx +++ b/src/python/espressomd/cellsystem.pyx @@ -30,9 +30,9 @@ from .utils cimport Vector3i from .utils cimport check_type_or_throw_except, make_array_locked cdef class CellSystem: - def set_domain_decomposition(self, use_verlet_lists=True): + def set_regular_decomposition(self, use_verlet_lists=True): """ - Activates domain decomposition cell system. + Activates regular decomposition cell system. Parameters ---------- @@ -42,7 +42,7 @@ cdef class CellSystem: """ mpi_set_use_verlet_lists(use_verlet_lists) - mpi_bcast_cell_structure(CELL_STRUCTURE_DOMDEC) + mpi_bcast_cell_structure(CellStructureType.CELL_STRUCTURE_REGULAR) handle_errors("Error while initializing the cell system.") return True @@ -59,19 +59,19 @@ cdef class CellSystem: """ mpi_set_use_verlet_lists(use_verlet_lists) - mpi_bcast_cell_structure(CELL_STRUCTURE_NSQUARE) + mpi_bcast_cell_structure(CellStructureType.CELL_STRUCTURE_NSQUARE) return True def get_state(self): s = self.__getstate__() - if cell_structure.decomposition_type() == CELL_STRUCTURE_DOMDEC: - dd = get_domain_decomposition() + if cell_structure.decomposition_type() == CellStructureType.CELL_STRUCTURE_REGULAR: + rd = get_regular_decomposition() s["cell_grid"] = np.array( - [dd.cell_grid[0], dd.cell_grid[1], dd.cell_grid[2]]) + [rd.cell_grid[0], rd.cell_grid[1], rd.cell_grid[2]]) s["cell_size"] = np.array( - [dd.cell_size[0], dd.cell_size[1], dd.cell_size[2]]) + [rd.cell_size[0], rd.cell_size[1], rd.cell_size[2]]) s["verlet_reuse"] = get_verlet_reuse() s["n_nodes"] = n_nodes @@ -81,9 +81,9 @@ cdef class CellSystem: def __getstate__(self): s = {"use_verlet_list": cell_structure.use_verlet_list} - if cell_structure.decomposition_type() == CELL_STRUCTURE_DOMDEC: - s["type"] = "domain_decomposition" - if cell_structure.decomposition_type() == CELL_STRUCTURE_NSQUARE: + if cell_structure.decomposition_type() == CellStructureType.CELL_STRUCTURE_REGULAR: + s["type"] = "regular_decomposition" + if cell_structure.decomposition_type() == CellStructureType.CELL_STRUCTURE_NSQUARE: s["type"] = "nsquare" s["skin"] = skin @@ -94,8 +94,8 @@ cdef class CellSystem: self.skin = d['skin'] self.node_grid = d['node_grid'] if 'type' in d: - if d['type'] == "domain_decomposition": - self.set_domain_decomposition( + if d['type'] == "regular_decomposition": + self.set_regular_decomposition( use_verlet_lists=d['use_verlet_list']) elif d['type'] == "nsquare": self.set_n_square(use_verlet_lists=d['use_verlet_list']) diff --git a/src/python/espressomd/electrokinetics.pyx b/src/python/espressomd/electrokinetics.pyx index b65d252fef9..7422bfd9ff5 100644 --- a/src/python/espressomd/electrokinetics.pyx +++ b/src/python/espressomd/electrokinetics.pyx @@ -394,34 +394,24 @@ IF ELECTROKINETICS: def __init__(self, **kwargs): Species.py_number_of_species += 1 self.id = Species.py_number_of_species + utils.check_required_keys(self.required_keys(), kwargs.keys()) + utils.check_valid_keys(self.valid_keys(), kwargs.keys()) self._params = self.default_params() - - # Check if all required keys are given - for k in self.required_keys(): - if k not in kwargs: - raise ValueError( - "At least the following keys have to be given as keyword arguments: " + self.required_keys().__str__() + " got " + kwargs.__str__()) - self._params[k] = kwargs[k] - - for k in kwargs: - if k in self.valid_keys(): - self._params[k] = kwargs[k] - else: - raise KeyError(f"{k} is not a valid key") + self._params.update(kwargs) def valid_keys(self): """ Returns the valid keys for the species. """ - return "density", "D", "valency", "ext_force_density" + return {"density", "D", "valency", "ext_force_density"} def required_keys(self): """ Returns the required keys for the species. """ - return ["density", "D", "valency"] + return {"density", "D", "valency"} def default_params(self): """ diff --git a/src/python/espressomd/electrostatics.pyx b/src/python/espressomd/electrostatics.pyx index 269bd511ddf..50e4e80ccf0 100644 --- a/src/python/espressomd/electrostatics.pyx +++ b/src/python/espressomd/electrostatics.pyx @@ -25,6 +25,7 @@ import numpy as np IF SCAFACOS == 1: from .scafacos import ScafacosConnector from . cimport scafacos +from . import utils from .utils import is_valid_type, check_type_or_throw_except, handle_errors from .analyze cimport partCfg, PartCfg from .particle_data cimport particle @@ -64,13 +65,9 @@ IF ELECTROSTATICS == 1: deactivate_method() handle_errors("Coulomb method deactivation") - def tune(self, **tune_params_subset): - if tune_params_subset is not None: - if all(k in self.valid_keys() for k in tune_params_subset): - self._params.update(tune_params_subset) - else: - raise ValueError( - "Invalid parameter given to tune function.") + def tune(self, **tune_params): + utils.check_valid_keys(self.valid_keys(), tune_params.keys()) + self._params.update(tune_params) self._tune() @@ -95,10 +92,10 @@ IF ELECTROSTATICS: pass def valid_keys(self): - return ["prefactor", "kappa", "r_cut", "check_neutrality"] + return {"prefactor", "kappa", "r_cut", "check_neutrality"} def required_keys(self): - return ["prefactor", "kappa", "r_cut"] + return {"prefactor", "kappa", "r_cut"} def _set_params_in_es_core(self): set_prefactor(self._params["prefactor"]) @@ -144,11 +141,11 @@ IF ELECTROSTATICS: pass def valid_keys(self): - return ["prefactor", "kappa", "epsilon1", "epsilon2", "r_cut", - "check_neutrality"] + return {"prefactor", "kappa", "epsilon1", "epsilon2", "r_cut", + "check_neutrality"} def required_keys(self): - return ["prefactor", "kappa", "epsilon1", "epsilon2", "r_cut"] + return {"prefactor", "kappa", "epsilon1", "epsilon2", "r_cut"} def _set_params_in_es_core(self): set_prefactor(self._params["prefactor"]) @@ -190,12 +187,12 @@ IF P3M == 1: mesh[i] = pmesh[i] def valid_keys(self): - return ["mesh", "cao", "accuracy", "epsilon", "alpha", "r_cut", + return {"mesh", "cao", "accuracy", "epsilon", "alpha", "r_cut", "prefactor", "tune", "check_neutrality", "timings", - "verbose", "mesh_off"] + "verbose", "mesh_off"} def required_keys(self): - return ["prefactor", "accuracy"] + return {"prefactor", "accuracy"} def default_params(self): return {"cao": 0, @@ -477,12 +474,12 @@ IF P3M == 1: "neutralize has to be a bool") def valid_keys(self): - return ["p3m_actor", "maxPWerror", "gap_size", "far_cut", + return {"p3m_actor", "maxPWerror", "gap_size", "far_cut", "neutralize", "delta_mid_top", "delta_mid_bot", - "const_pot", "pot_diff", "check_neutrality"] + "const_pot", "pot_diff", "check_neutrality"} def required_keys(self): - return ["p3m_actor", "maxPWerror", "gap_size"] + return {"p3m_actor", "maxPWerror", "gap_size"} def default_params(self): return {"maxPWerror": -1, @@ -576,12 +573,12 @@ IF ELECTROSTATICS: "verbose": True} def valid_keys(self): - return ["prefactor", "maxPWerror", "far_switch_radius", + return {"prefactor", "maxPWerror", "far_switch_radius", "bessel_cutoff", "tune", "check_neutrality", "timings", - "verbose"] + "verbose"} def required_keys(self): - return ["prefactor", "maxPWerror"] + return {"prefactor", "maxPWerror"} def _get_params_from_es_core(self): params = {} @@ -662,11 +659,11 @@ IF ELECTROSTATICS and MMM1D_GPU: "check_neutrality": True} def valid_keys(self): - return ["prefactor", "maxPWerror", "far_switch_radius", - "bessel_cutoff", "tune", "check_neutrality"] + return {"prefactor", "maxPWerror", "far_switch_radius", + "bessel_cutoff", "tune", "check_neutrality"} def required_keys(self): - return ["prefactor", "maxPWerror"] + return {"prefactor", "maxPWerror"} def _get_params_from_es_core(self): params = {} diff --git a/src/python/espressomd/integrate.pyx b/src/python/espressomd/integrate.pyx index 96d7a0db2b2..9851626ab2f 100644 --- a/src/python/espressomd/integrate.pyx +++ b/src/python/espressomd/integrate.pyx @@ -20,6 +20,7 @@ from cpython.exc cimport PyErr_CheckSignals, PyErr_SetInterrupt include "myconfig.pxi" from .utils cimport check_type_or_throw_except from .utils import handle_errors +from . import utils from . cimport integrate cdef class IntegratorHandle: @@ -158,13 +159,7 @@ cdef class Integrator: return self.__getstate__() def __init__(self, *args, **kwargs): - - # Check if all required keys are given - for k in self.required_keys(): - if k not in kwargs: - raise ValueError( - "At least the following keys have to be given as keyword arguments: " + self.required_keys().__str__()) - + utils.check_required_keys(self.required_keys(), kwargs.keys()) self._params = self.default_params() self._params.update(kwargs) self.validate_params() @@ -322,13 +317,13 @@ cdef class VelocityVerlet(Integrator): """All parameters that can be set. """ - return {} + return set() def required_keys(self): """Parameters that have to be set. """ - return {} + return set() def validate_params(self): return True @@ -411,13 +406,13 @@ cdef class BrownianDynamics(Integrator): """All parameters that can be set. """ - return {} + return set() def required_keys(self): """Parameters that have to be set. """ - return {} + return set() def validate_params(self): return True diff --git a/src/python/espressomd/interactions.pyx b/src/python/espressomd/interactions.pyx index dc7925064b5..9b82e08cd61 100644 --- a/src/python/espressomd/interactions.pyx +++ b/src/python/espressomd/interactions.pyx @@ -22,6 +22,7 @@ cimport cpython.object import collections include "myconfig.pxi" +from . import utils from .utils import is_valid_type from .utils cimport check_type_or_throw_except from .script_interface import ScriptObjectRegistry, ScriptInterfaceHelper, script_interface_register @@ -56,16 +57,11 @@ cdef class NonBondedInteraction: # Or have we been called with keyword args describing the interaction elif len(args) == 0: + utils.check_required_keys(self.required_keys(), kwargs.keys()) + utils.check_valid_keys(self.valid_keys(), kwargs.keys()) # Initialize default values self._params = self.default_params() self._part_types = [-1, -1] - - # Check if all required keys are given - for k in self.required_keys(): - if k not in kwargs: - raise ValueError( - "At least the following keys have to be given as keyword arguments: " + self.required_keys().__str__()) - self._params.update(kwargs) self.validate_params() else: @@ -111,18 +107,12 @@ cdef class NonBondedInteraction: """ # Check, if any key was passed, which is not known - for k in p.keys(): - if k not in self.valid_keys(): - raise ValueError( - "Only the following keys are supported: " + self.valid_keys().__str__()) + utils.check_valid_keys(self.valid_keys(), p.keys()) # When an interaction is newly activated, all required keys must be # given if not self.is_active(): - for k in self.required_keys(): - if k not in p: - raise ValueError( - "At least the following keys have to be given as keyword arguments: " + self.required_keys().__str__()) + utils.check_required_keys(self.required_keys(), p.keys()) # If this instance refers to an interaction defined in the ESPResSo core, # load the parameters from there @@ -1696,7 +1686,7 @@ class BondedInteraction(ScriptInterfaceHelper): params.update(kwargs) self.validate_params(params) super().__init__(*args, **params) - self._check_keys(params.keys(), check_required=True) + utils.check_valid_keys(self.valid_keys(), kwargs.keys()) self._ctor_params = params self._bond_id = -1 else: @@ -1706,21 +1696,6 @@ class BondedInteraction(ScriptInterfaceHelper): self._bond_id = -1 self._ctor_params = self._get_params_from_es_core() - def _check_keys(self, keys, check_required=False): - def err_msg(key_set): - return f'{{{", ".join(key_set)}}}' - - if check_required: - for required_key in self.required_keys(): - if required_key not in keys: - raise ValueError( - f"At least the following keys have to be given as keyword arguments: {err_msg(self.required_keys())}") - - for key in keys: - if key not in self.valid_keys(): - raise ValueError( - f"Key '{key}' invalid! Only the following keys are supported: {err_msg(self.valid_keys())}") - def __reduce__(self): if self._bond_id != -1: # checkpointing constructor #1 @@ -2663,7 +2638,8 @@ class IBM_Tribend(BondedInteraction): return {"refShape": "Flat"} def _get_params_from_es_core(self): - return {"kb": self.kb, "theta0": self.theta0} + return {"kb": self.kb, "theta0": self.theta0, + "refShape": self.refShape} @script_interface_register diff --git a/src/python/espressomd/io/mpiio.py b/src/python/espressomd/io/mpiio.py index e89ac28920d..bf617dcf1a1 100644 --- a/src/python/espressomd/io/mpiio.py +++ b/src/python/espressomd/io/mpiio.py @@ -14,22 +14,18 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from ..script_interface import PScriptInterface +from ..script_interface import ScriptInterfaceHelper, script_interface_register -class Mpiio: +@script_interface_register +class Mpiio(ScriptInterfaceHelper): """MPI-IO object. Used to output particle data using MPI-IO to binary files. - - .. note:: - See the :meth:`write` and :meth:`read` methods for documentation. """ - - def __init__(self): - self._instance = PScriptInterface( - "ScriptInterface::MPIIO::MPIIOScript") + _so_name = "ScriptInterface::MPIIO::MPIIOScript" + _so_creation_policy = "GLOBAL" def write(self, prefix=None, positions=False, velocities=False, types=False, bonds=False): @@ -75,7 +71,7 @@ def write(self, prefix=None, positions=False, velocities=False, if not positions and not velocities and not types and not bonds: raise ValueError("No output fields chosen.") - self._instance.call_method( + self.call_method( "write", prefix=prefix, pos=positions, vel=velocities, typ=types, bond=bonds) def read(self, prefix=None, positions=False, velocities=False, @@ -96,8 +92,5 @@ def read(self, prefix=None, positions=False, velocities=False, if not positions and not velocities and not types and not bonds: raise ValueError("No output fields chosen.") - self._instance.call_method( + self.call_method( "read", prefix=prefix, pos=positions, vel=velocities, typ=types, bond=bonds) - - -mpiio = Mpiio() diff --git a/src/python/espressomd/magnetostatics.pyx b/src/python/espressomd/magnetostatics.pyx index 3dc0571a3bf..c3dfea76264 100644 --- a/src/python/espressomd/magnetostatics.pyx +++ b/src/python/espressomd/magnetostatics.pyx @@ -131,12 +131,12 @@ IF DP3M == 1: raise ValueError("DipolarP3M timings must be > 0") def valid_keys(self): - return ["prefactor", "alpha_L", "r_cut_iL", "mesh", "mesh_off", + return {"prefactor", "alpha_L", "r_cut_iL", "mesh", "mesh_off", "cao", "accuracy", "epsilon", "cao_cut", "a", "ai", - "alpha", "r_cut", "cao3", "tune", "timings", "verbose"] + "alpha", "r_cut", "cao3", "tune", "timings", "verbose"} def required_keys(self): - return ["accuracy", ] + return {"accuracy"} def default_params(self): return {"cao": -1, @@ -218,10 +218,10 @@ IF DIPOLES == 1: return {} def required_keys(self): - return () + return set() def valid_keys(self): - return ("prefactor",) + return {"prefactor"} def _get_params_from_es_core(self): return {"prefactor": self.get_magnetostatics_prefactor()} @@ -256,10 +256,10 @@ IF DIPOLES == 1: return {} def required_keys(self): - return ("n_replica",) + return {"n_replica"} def valid_keys(self): - return ("prefactor", "n_replica") + return {"prefactor", "n_replica"} def _get_params_from_es_core(self): return {"prefactor": self.get_magnetostatics_prefactor(), @@ -341,10 +341,10 @@ IF DIPOLES == 1: return {} def required_keys(self): - return () + return set() def valid_keys(self): - return ("prefactor",) + return {"prefactor"} def _get_params_from_es_core(self): return {"prefactor": self.get_magnetostatics_prefactor()} @@ -382,10 +382,10 @@ IF DIPOLES == 1: "itolsq": 4.0} def required_keys(self): - return () + return set() def valid_keys(self): - return ("prefactor", "epssq", "itolsq") + return {"prefactor", "epssq", "itolsq"} def _get_params_from_es_core(self): return {"prefactor": self.get_magnetostatics_prefactor()} diff --git a/src/python/espressomd/polymer.pyx b/src/python/espressomd/polymer.pyx index 40e5ae68536..93c912554b4 100644 --- a/src/python/espressomd/polymer.pyx +++ b/src/python/espressomd/polymer.pyx @@ -19,6 +19,7 @@ include "myconfig.pxi" from . cimport polymer +from . import utils import numpy as np from .system import System from .interactions import BondedInteraction @@ -130,17 +131,9 @@ def linear_polymer_positions(**kwargs): required_keys = ["n_polymers", "beads_per_chain", "bond_length", "seed"] - for k in kwargs: - if k not in valid_keys: - raise ValueError(f"Unknown parameter '{k}'") - params[k] = kwargs[k] - - for k in required_keys: - if k not in kwargs: - print(k) - raise ValueError( - "At least the following keys have to be given as keyword arguments: " + required_keys.__str__()) - + utils.check_required_keys(required_keys, kwargs.keys()) + utils.check_valid_keys(valid_keys, kwargs.keys()) + params.update(kwargs) validate_params(params, default_params) cdef vector[Vector3d] start_positions diff --git a/src/python/espressomd/reaction_ensemble.pxd b/src/python/espressomd/reaction_ensemble.pxd deleted file mode 100644 index 4a1261a468e..00000000000 --- a/src/python/espressomd/reaction_ensemble.pxd +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (C) 2010-2019 The ESPResSo project -# -# This file is part of ESPResSo. -# -# ESPResSo is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ESPResSo is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -include "myconfig.pxi" - -from libcpp cimport bool -from libcpp.vector cimport vector -from libcpp.pair cimport pair -from libcpp.string cimport string -from libcpp.map cimport map -from .utils cimport Vector2d - -cdef extern from "reaction_methods/SingleReaction.hpp" namespace "ReactionMethods": - - ctypedef struct SingleReaction: - vector[int] reactant_types - vector[int] reactant_coefficients - vector[int] product_types - vector[int] product_coefficients - double gamma - int nu_bar - double get_acceptance_rate() - -cdef extern from "reaction_methods/ReactionAlgorithm.hpp" namespace "ReactionMethods": - - cdef cppclass CReactionAlgorithm "ReactionMethods::ReactionAlgorithm": - int CReactionAlgorithm(int seed) - int do_reaction(int reaction_steps) except + - bool do_global_mc_move_for_particles_of_type(int type, int particle_number_of_type) - void set_cuboid_reaction_ensemble_volume() - int check_reaction_method() except + - double get_acceptance_rate_configurational_moves() - int delete_particle(int p_id) - void add_reaction(double gamma, vector[int] reactant_types, vector[int] reactant_coefficients, vector[int] product_types, vector[int] product_coefficients) except + - void delete_reaction(int reaction_id) - void set_cyl_constraint(double center_x, double center_y, double radius) except + - void set_slab_constraint(double slab_start_z, double slab_end_z) except + - void remove_constraint() - Vector2d get_slab_constraint_parameters() - - vector[SingleReaction] reactions - map[int, double] charges_of_types - double kT - double exclusion_radius - double volume - int non_interacting_type - -cdef extern from "reaction_methods/ReactionEnsemble.hpp" namespace "ReactionMethods": - - cdef cppclass CReactionEnsemble "ReactionMethods::ReactionEnsemble"(CReactionAlgorithm): - CReactionEnsemble(int seed) - -cdef extern from "reaction_methods/ConstantpHEnsemble.hpp" namespace "ReactionMethods": - - cdef cppclass CConstantpHEnsemble "ReactionMethods::ConstantpHEnsemble"(CReactionAlgorithm): - CConstantpHEnsemble(int seed) - double m_constant_pH - -cdef extern from "reaction_methods/WidomInsertion.hpp" namespace "ReactionMethods": - - cdef cppclass CWidomInsertion "ReactionMethods::WidomInsertion"(CReactionAlgorithm): - CWidomInsertion(int seed) - double calculate_particle_insertion_potential_energy(SingleReaction & current_reaction) except + diff --git a/src/python/espressomd/reaction_ensemble.py b/src/python/espressomd/reaction_ensemble.py new file mode 100644 index 00000000000..1f694b905b5 --- /dev/null +++ b/src/python/espressomd/reaction_ensemble.py @@ -0,0 +1,547 @@ +# Copyright (C) 2010-2019 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import numpy as np +import warnings +from .script_interface import ScriptInterfaceHelper, script_interface_register +from . import utils + + +@script_interface_register +class SingleReaction(ScriptInterfaceHelper): + _so_name = "ReactionMethods::SingleReaction" + _so_creation_policy = "LOCAL" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + if not 'sip' in kwargs: + utils.check_valid_keys(self.valid_keys(), kwargs.keys()) + + def valid_keys(self): + return ("reactant_types", "reactant_coefficients", + "product_types", "product_coefficients", "gamma") + + def required_keys(self): + return ("reactant_types", "reactant_coefficients", + "product_types", "product_coefficients", "gamma") + + def make_backward_reaction(self): + return SingleReaction( + gamma=1. / self.gamma, reactant_types=self.product_types, + reactant_coefficients=self.product_coefficients, + product_types=self.reactant_types, + product_coefficients=self.reactant_coefficients) + + +@script_interface_register +class ReactionAlgorithm(ScriptInterfaceHelper): + """ + + This class provides the base class for Reaction Algorithms like + the Reaction Ensemble algorithm and the constant pH method. + Initialize the reaction algorithm by setting the + standard pressure, temperature, and the exclusion radius. + + Note: When creating particles the velocities of the new particles are set + according the Maxwell-Boltzmann distribution. In this step the mass of the + new particle is assumed to equal 1. + + + Parameters + ---------- + kT : :obj:`float` + Thermal energy of the system in simulation units + exclusion_radius : :obj:`float` + Minimal distance from any particle, within which new particle will not + be inserted. This is useful to avoid integrator failures if particles + are too close and there is a diverging repulsive interaction, or to + prevent two oppositely charged particles from being placed on top of + each other. The Boltzmann factor :math:`\\exp(-\\beta E)` gives these + configurations a small contribution to the partition function, + therefore they can be neglected. + seed : :obj:`int` + Initial counter value (or seed) of the Mersenne Twister RNG. + + Methods + ------- + remove_constraint() + Remove any previously defined constraint. + Requires setting the volume using :meth:`set_volume`. + + set_cylindrical_constraint_in_z_direction() + Constrain the reaction moves within a cylinder aligned on the z-axis. + Requires setting the volume using :meth:`set_volume`. + + Parameters + ---------- + center_x : :obj:`float` + x coordinate of center of the cylinder. + center_y : :obj:`float` + y coordinate of center of the cylinder. + radius_of_cylinder : :obj:`float` + radius of the cylinder + + set_wall_constraints_in_z_direction() + Restrict the sampling area to a slab in z-direction. Requires setting + the volume using :meth:`set_volume`. This constraint is necessary when + working with :ref:`Electrostatic Layer Correction (ELC)`. + + Parameters + ---------- + slab_start_z : :obj:`float` + z coordinate of the bottom wall. + slab_end_z : :obj:`float` + z coordinate of the top wall. + + Examples + -------- + + >>> import espressomd + >>> import espressomd.shapes + >>> import espressomd.electrostatics + >>> import espressomd.reaction_ensemble + >>> import numpy as np + >>> # setup a charged system + >>> box_l = 20 + >>> elc_gap = 10 + >>> system = espressomd.System(box_l=[box_l, box_l, box_l + elc_gap]) + >>> system.time_step = 0.001 + >>> system.cell_system.skin = 0.4 + >>> types = {"HA": 0, "A-": 1, "H+": 2, "wall": 3} + >>> charges = {types["HA"]: 0, types["A-"]: -1, types["H+"]: +1} + >>> for i in range(10): + ... system.part.add(pos=np.random.random(3) * box_l, type=types["A-"], q=charges[types["A-"]]) + ... system.part.add(pos=np.random.random(3) * box_l, type=types["H+"], q=charges[types["H+"]]) + >>> for particle_type in charges.keys(): + ... system.non_bonded_inter[particle_type, types["wall"]].wca.set_params(epsilon=1.0, sigma=1.0) + >>> # add ELC actor + >>> p3m = espressomd.electrostatics.P3M(prefactor=1.0, accuracy=1e-2) + >>> elc = espressomd.electrostatics.ELC(p3m_actor=p3m, maxPWerror=1.0, gap_size=elc_gap) + >>> system.actors.add(elc) + >>> # add constant pH method + >>> RE = espressomd.reaction_ensemble.ConstantpHEnsemble(kT=1, exclusion_radius=1, seed=77) + >>> RE.constant_pH = 2 + >>> RE.add_reaction(gamma=0.0088, reactant_types=[types["HA"]], + ... product_types=[types["A-"], types["H+"]], + ... default_charges=charges) + >>> # add walls for the ELC gap + >>> RE.set_wall_constraints_in_z_direction(0, box_l) + >>> RE.set_volume(box_l**3) + >>> system.constraints.add(shape=espressomd.shapes.Wall(dist=0, normal=[0, 0, 1]), + ... particle_type=types["wall"]) + >>> system.constraints.add(shape=espressomd.shapes.Wall(dist=-box_l, normal=[0, 0, -1]), + ... particle_type=types["wall"]) + + get_wall_constraints_in_z_direction() + Returns the restrictions of the sampling area in z-direction. + + set_volume() + Set the volume to be used in the acceptance probability of the reaction + ensemble. This can be useful when using constraints, if the relevant + volume is different from the box volume. If not used the default volume + which is used, is the box volume. + + Parameters + ---------- + volume : :obj:`float` + Volume of the system in simulation units + + get_volume() + Get the volume to be used in the acceptance probability of the reaction + ensemble. + + get_acceptance_rate_configurational_moves(): + Returns the acceptance rate for the configuration moves. + + get_acceptance_rate_reaction() + Returns the acceptance rate for the given reaction. + + Parameters + ---------- + reaction_id : :obj:`int` + Reaction id + + set_non_interacting_type() + Sets the particle type for non-interacting particles. + Default value: 100. + This is used to temporarily hide particles during a reaction trial + move, if they are to be deleted after the move is accepted. Please + change this value if you intend to use the type 100 for some other + particle types with interactions, or if you need improved performance, + as the default value of 100 causes some overhead. + Please also note that particles + in the current implementation of the Reaction Ensemble are only + hidden with respect to Lennard-Jones and Coulomb interactions. Hiding + of other interactions, for example a magnetic, needs to be implemented + in the code. + + Parameters + ---------- + type : :obj:`int` + Particle type for the hidden particles + + get_non_interacting_type() + Returns the type which is used for hiding particle + + reaction() + Performs randomly selected reactions. + + Parameters + ---------- + reaction_steps : :obj:`int`, optional + The number of reactions to be performed at once, defaults to 1. + + displacement_mc_move_for_particles_of_type() + Performs a displacement Monte Carlo move for particles of given type. + New positions of the displaced particles are chosen from the whole box + with a uniform probability distribution. If there are multiple types, + that are being moved in a simulation, they should be moved in a random + order to avoid artefacts. + + Parameters + ---------- + type_mc : :obj:`int` + Particle type which should be moved + particle_number_to_be_changed : :obj:`int` + Number of particles to move, defaults to 1. + + delete_particle() + Deletes the particle of the given p_id and makes sure that the particle + range has no holes. This function has some restrictions, as e.g. bonds + are not deleted. Therefore only apply this function to simple ions. + + Parameters + ---------- + p_id : :obj:`int` + Id of the particle to be deleted. + + change_reaction_constant() + Changes the reaction constant of a given reaction + (for both the forward and backward reactions). + The ``reaction_id`` which is assigned to a reaction + depends on the order in which :meth:`add_reaction` was called. + The 0th reaction has ``reaction_id=0``, the next added + reaction needs to be addressed with ``reaction_id=1``, etc. + + Parameters + ---------- + reaction_id : :obj:`int` + Reaction id + gamma : :obj:`float` + New reaction constant + + delete_reaction() + Delete a reaction from the set of used reactions + (the forward and backward reaction). + The ``reaction_id`` which is assigned to a reaction + depends on the order in which :meth:`add_reaction` was called. + The 0th reaction has ``reaction_id=0``, the next added + reaction needs to be addressed with ``reaction_id=1``, etc. + After the deletion of a reaction subsequent reactions + take the ``reaction_id`` of the deleted reaction. + + Parameters + ---------- + reaction_id : :obj:`int` + Reaction id + + """ + + _so_name = "ReactionMethods::ReactionAlgorithm" + _so_creation_policy = "LOCAL" + _so_bind_methods = ("remove_constraint", + "get_wall_constraints_in_z_direction", + "set_wall_constraints_in_z_direction", + "set_cylindrical_constraint_in_z_direction", + "set_volume", + "get_volume", + "get_acceptance_rate_reaction", + "set_non_interacting_type", + "get_non_interacting_type", + "reaction", + "displacement_mc_move_for_particles_of_type", + "check_reaction_method", + "change_reaction_constant", + "delete_reaction", + "delete_particle", + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + if not 'sip' in kwargs: + utils.check_valid_keys(self.valid_keys(), kwargs.keys()) + + def valid_keys(self): + return {"kT", "exclusion_radius", "seed"} + + def required_keys(self): + return {"kT", "exclusion_radius", "seed"} + + def add_reaction(self, **kwargs): + """ + Sets up a reaction in the forward and backward direction. + + Parameters + ---------- + gamma : :obj:`float` + Equilibrium constant :math:`\\Gamma` of the reaction in simulation + units (see section :ref:`Reaction Ensemble` for its definition). + reactant_types : list of :obj:`int` + List of particle types of reactants in the reaction. + reactant_coefficients : list of :obj:`int` + List of stoichiometric coefficients of the reactants in the same + order as the list of their types. + product_types : list of :obj:`int` + List of particle types of products in the reaction. + product_coefficients : list of :obj:`int` + List of stoichiometric coefficients of products of the reaction in + the same order as the list of their types + default_charges : :obj:`dict` + A dictionary of default charges for types that occur + in the provided reaction. + check_for_electroneutrality : :obj:`bool` + Check for electroneutrality of the given reaction. + Default is ``True``. + + """ + default_charges = kwargs.pop("default_charges") + neutrality_check = kwargs.pop("check_for_electroneutrality", True) + forward_reaction = SingleReaction(**kwargs) + backward_reaction = forward_reaction.make_backward_reaction() + if neutrality_check: + self._check_charge_neutrality( + type2charge=default_charges, + reaction=forward_reaction) + + self.call_method("add_reaction", reaction=forward_reaction) + self.call_method("add_reaction", reaction=backward_reaction) + + for ptype, charge in default_charges.items(): + self.call_method("set_charge_of_type", type=ptype, charge=charge) + self.call_method("check_reaction_method") + + def _check_charge_neutrality(self, type2charge, reaction): + if not isinstance(type2charge, dict): + raise ValueError( + "No dictionary for relation between types and default charges provided.") + charges = np.array(list(type2charge.values())) + if np.count_nonzero(charges) == 0: + # all particles have zero charge + # no need to check electroneutrality + return + # calculate net change of electrical charge for the reaction + net_charge_change = 0.0 + for coef, ptype in zip( + reaction.reactant_coefficients, reaction.reactant_types): + net_charge_change -= coef * type2charge[ptype] + for coef, ptype in zip( + reaction.product_coefficients, reaction.product_types): + net_charge_change += coef * type2charge[ptype] + min_abs_nonzero_charge = np.min( + np.abs(charges[np.nonzero(charges)[0]])) + if abs(net_charge_change) / min_abs_nonzero_charge > 1e-10: + raise ValueError("Reaction system is not charge neutral") + + def get_status(self): + """ + Returns the status of the reaction ensemble in a dictionary containing + the used reactions, the used kT and the used exclusion radius. + + """ + + self.call_method("check_reaction_method") + reactions_list = [] + + for core_reaction in self.reactions: + reaction = {"reactant_coefficients": core_reaction.reactant_coefficients, + "reactant_types": core_reaction.reactant_types, + "product_types": core_reaction.product_types, + "product_coefficients": core_reaction.product_coefficients, + "reactant_types": core_reaction.reactant_types, + "gamma": core_reaction.gamma} + reactions_list.append(reaction) + + return {"reactions": reactions_list, "kT": self.kT, + "exclusion_radius": self.exclusion_radius} + + +@script_interface_register +class ReactionEnsemble(ReactionAlgorithm): + """ + This class implements the Reaction Ensemble. + """ + + _so_name = "ReactionMethods::ReactionEnsemble" + _so_creation_policy = "LOCAL" + + +@script_interface_register +class ConstantpHEnsemble(ReactionAlgorithm): + """ + This class implements the constant pH Ensemble. + + When adding an acid-base reaction, the acid and base particle types + are always assumed to be at index 0 of the lists passed to arguments + ``reactant_types`` and ``product_types``. + + Attributes + ---------- + constant_pH : :obj:`float` + Constant pH value. + + """ + _so_name = "ReactionMethods::ConstantpHEnsemble" + _so_creation_policy = "LOCAL" + + def valid_keys(self): + return {"kT", "exclusion_radius", "seed", "constant_pH"} + + def required_keys(self): + return {"kT", "exclusion_radius", "seed", "constant_pH"} + + def add_reaction(self, *args, **kwargs): + warn_msg = ( + "arguments 'reactant_coefficients' and 'product_coefficients' " + "are deprecated and are no longer necessary for the constant pH " + "ensemble. They are kept for backward compatibility but might " + "be deleted in future versions.") + err_msg = ("All product and reactant coefficients must equal one in " + "the constant pH method as implemented in ESPResSo.") + warn_user = False + + if "reactant_coefficients" in kwargs: + if kwargs["reactant_coefficients"][0] != 1: + raise ValueError(err_msg) + else: + warn_user = True + else: + kwargs["reactant_coefficients"] = [1] + + if "product_coefficients" in kwargs: + if kwargs["product_coefficients"][0] != 1 or kwargs["product_coefficients"][1] != 1: + raise ValueError(err_msg) + else: + warn_user = True + else: + kwargs["product_coefficients"] = [1, 1] + + if warn_user: + warnings.warn(warn_msg, FutureWarning) + + if(len(kwargs["product_types"]) != 2 or len(kwargs["reactant_types"]) != 1): + raise ValueError( + "The constant pH method is only implemented for reactions " + "with two product types and one adduct type.") + + super().add_reaction(*args, **kwargs) + + +@script_interface_register +class WidomInsertion(ReactionAlgorithm): + """ + This class implements the Widom insertion method in the canonical ensemble + for homogeneous systems, where the excess chemical potential is not + depending on the location. + + """ + + _so_name = "ReactionMethods::WidomInsertion" + _so_creation_policy = "LOCAL" + + def required_keys(self): + return {"kT", "seed"} + + def valid_keys(self): + return {"kT", "seed"} + + def add_reaction(self, **kwargs): + kwargs['gamma'] = 1. + super().add_reaction(**kwargs) + + def calculate_particle_insertion_potential_energy(self, **kwargs): + """ + Measures the potential energy when particles are inserted in the + system following the reaction provided ``reaction_id``. Please + define the insertion moves first by calling the method + :meth:`~ReactionAlgorithm.add_reaction` (with only product types + specified). + + Note that although this function does not provide directly + the chemical potential, it can be used to calculate it. + For an example of such an application please check + :file:`/samples/widom_insertion.py`. + """ + # make inverse widom scheme (deletion of particles) inaccessible. + # The deletion reactions are the odd reaction_ids + + return self.call_method( + "calculate_particle_insertion_potential_energy", **kwargs) + + def calculate_excess_chemical_potential( + self, **kwargs): + """ + Given a set of samples of the particle insertion potential energy, + calculates the excess chemical potential and its statistical error. + + Parameters + ---------- + particle_insertion_potential_energy_samples : array_like of :obj:`float` + Samples of the particle insertion potential energy. + N_blocks : :obj:`int`, optional + Number of bins for binning analysis. + + Returns + ------- + mean : :obj:`float` + Mean excess chemical potential. + error : :obj:`float` + Standard error of the mean. + """ + + def do_block_analysis(samples, N_blocks): + """ + Performs a binning analysis of samples. + Divides the samples in ``N_blocks`` equispaced blocks + and returns the mean and its uncertainty + """ + size_block = int(len(samples) / N_blocks) + block_list = [] + for block in range(N_blocks): + block_list.append( + np.mean(samples[block * size_block:(block + 1) * size_block])) + + sample_mean = np.mean(block_list) + sample_std = np.std(block_list, ddof=1) + sample_uncertainty = sample_std / np.sqrt(N_blocks) + + return sample_mean, sample_uncertainty + + kT = self.kT + + gamma_samples = np.exp(-1.0 * np.array( + kwargs["particle_insertion_potential_energy_samples"]) / kT) + + gamma_mean, gamma_std = do_block_analysis( + samples=gamma_samples, N_blocks=kwargs.get("N_blocks", 16)) + + mu_ex_mean = -kT * np.log(gamma_mean) + + # full propagation of error + mu_ex_Delta = 0.5 * kT * abs(-np.log(gamma_mean + gamma_std) - + (-np.log(gamma_mean - gamma_std))) + + return mu_ex_mean, mu_ex_Delta diff --git a/src/python/espressomd/reaction_ensemble.pyx b/src/python/espressomd/reaction_ensemble.pyx deleted file mode 100644 index 246e6eba45a..00000000000 --- a/src/python/espressomd/reaction_ensemble.pyx +++ /dev/null @@ -1,686 +0,0 @@ -# Copyright (C) 2010-2019 The ESPResSo project -# -# This file is part of ESPResSo. -# -# ESPResSo is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ESPResSo is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -include "myconfig.pxi" -from libcpp.vector cimport vector -from libcpp.memory cimport unique_ptr -from cython.operator cimport dereference as deref -import numpy as np -import warnings - - -cdef class ReactionAlgorithm: - """ - - This class provides the base class for Reaction Algorithms like - the Reaction Ensemble algorithm and the constant pH method. - Initialize the reaction algorithm by setting the - standard pressure, temperature, and the exclusion radius. - - Note: When creating particles the velocities of the new particles are set - according the Maxwell-Boltzmann distribution. In this step the mass of the - new particle is assumed to equal 1. - - - Parameters - ---------- - kT : :obj:`float` - Thermal energy of the system in simulation units - exclusion_radius : :obj:`float` - Minimal distance from any particle, within which new particle will not - be inserted. This is useful to avoid integrator failures if particles - are too close and there is a diverging repulsive interaction, or to - prevent two oppositely charged particles from being placed on top of - each other. The Boltzmann factor :math:`\\exp(-\\beta E)` gives these - configurations a small contribution to the partition function, - therefore they can be neglected. - seed : :obj:`int` - Initial counter value (or seed) of the Mersenne Twister RNG. - """ - cdef object _params - cdef CReactionAlgorithm * RE - - def _valid_keys(self): - return "kT", "exclusion_radius", "seed" - - def _required_keys(self): - return "kT", "exclusion_radius", "seed" - - def _set_params_in_es_core(self): - deref(self.RE).kT = self._params["kT"] - # setting a volume is a side effect, sets the default volume of the - # reaction ensemble as the volume of the cuboid simulation box. this - # volume can be altered by the command "reaction ensemble volume - # " if one wants to simulate e.g. in a system with constraint - # (e.g. cuboid box with cylinder constraint, so that the particles are - # only contained in the volume of the cylinder) - if deref(self.RE).volume < 0: - deref(self.RE).set_cuboid_reaction_ensemble_volume() - deref(self.RE).exclusion_radius = self._params["exclusion_radius"] - - def remove_constraint(self): - """ - Remove any previously defined constraint. - Requires setting the volume using :meth:`set_volume`. - - """ - deref(self.RE).remove_constraint() - - def set_cylindrical_constraint_in_z_direction(self, center_x, center_y, - radius_of_cylinder): - """ - Constrain the reaction moves within a cylinder defined by its axis - passing through centres (:math:`x` and :math:`y`) and the radius. - Requires setting the volume using :meth:`set_volume`. - - Parameters - ---------- - center_x : :obj:`float` - x coordinate of center of the cylinder. - center_y : :obj:`float` - y coordinate of center of the cylinder. - radius_of_cylinder : :obj:`float` - radius of the cylinder - - """ - deref(self.RE).set_cyl_constraint( - center_x, center_y, radius_of_cylinder) - - def set_wall_constraints_in_z_direction(self, slab_start_z, slab_end_z): - """ - Restrict the sampling area to a slab in z-direction. Requires setting - the volume using :meth:`set_volume`. This constraint is necessary when - working with :ref:`Electrostatic Layer Correction (ELC)`. - - Parameters - ---------- - slab_start_z : :obj:`float` - z coordinate of the bottom wall. - slab_end_z : :obj:`float` - z coordinate of the top wall. - - Examples - -------- - - >>> import espressomd - >>> import espressomd.shapes - >>> import espressomd.electrostatics - >>> import espressomd.reaction_ensemble - >>> import numpy as np - >>> # setup a charged system - >>> box_l = 20 - >>> elc_gap = 10 - >>> system = espressomd.System(box_l=[box_l, box_l, box_l + elc_gap]) - >>> system.time_step = 0.001 - >>> system.cell_system.skin = 0.4 - >>> types = {"HA": 0, "A-": 1, "H+": 2, "wall": 3} - >>> charges = {types["HA"]: 0, types["A-"]: -1, types["H+"]: +1} - >>> for i in range(10): - ... system.part.add(pos=np.random.random(3) * box_l, type=types["A-"], q=charges[types["A-"]]) - ... system.part.add(pos=np.random.random(3) * box_l, type=types["H+"], q=charges[types["H+"]]) - >>> for particle_type in charges.keys(): - ... system.non_bonded_inter[particle_type, types["wall"]].wca.set_params(epsilon=1.0, sigma=1.0) - >>> # add ELC actor - >>> p3m = espressomd.electrostatics.P3M(prefactor=1.0, accuracy=1e-2) - >>> elc = espressomd.electrostatics.ELC(p3m_actor=p3m, maxPWerror=1.0, gap_size=elc_gap) - >>> system.actors.add(elc) - >>> # add constant pH method - >>> RE = espressomd.reaction_ensemble.ConstantpHEnsemble(kT=1, exclusion_radius=1, seed=77) - >>> RE.constant_pH = 2 - >>> RE.add_reaction(gamma=0.0088, reactant_types=[types["HA"]], - ... product_types=[types["A-"], types["H+"]], - ... default_charges=charges) - >>> # add walls for the ELC gap - >>> RE.set_wall_constraints_in_z_direction(0, box_l) - >>> RE.set_volume(box_l**3) - >>> system.constraints.add(shape=espressomd.shapes.Wall(dist=0, normal=[0, 0, 1]), - ... particle_type=types["wall"]) - >>> system.constraints.add(shape=espressomd.shapes.Wall(dist=-box_l, normal=[0, 0, -1]), - ... particle_type=types["wall"]) - - - """ - deref(self.RE).set_slab_constraint(slab_start_z, slab_end_z) - - def get_wall_constraints_in_z_direction(self): - """ - Returns the restrictions of the sampling area in z-direction. - - """ - v = deref(self.RE).get_slab_constraint_parameters() - return [v[0], v[1]] - - def set_volume(self, volume): - """ - Set the volume to be used in the acceptance probability of the reaction - ensemble. This can be useful when using constraints, if the relevant - volume is different from the box volume. If not used the default volume - which is used, is the box volume. - - """ - deref(self.RE).volume = volume - - def get_volume(self): - """ - Get the volume to be used in the acceptance probability of the reaction - ensemble. - - """ - return deref(self.RE).volume - - def get_acceptance_rate_configurational_moves(self): - """ - Returns the acceptance rate for the configuration moves. - - """ - return deref(self.RE).get_acceptance_rate_configurational_moves() - - def get_acceptance_rate_reaction(self, reaction_id): - """ - Returns the acceptance rate for the given reaction. - - """ - return deref(self.RE).reactions[reaction_id].get_acceptance_rate() - - def set_non_interacting_type(self, non_interacting_type): - """ - Sets the particle type for non-interacting particles. - Default value: 100. - This is used to temporarily hide particles during a reaction trial - move, if they are to be deleted after the move is accepted. Please - change this value if you intend to use the type 100 for some other - particle types with interactions, or if you need improved performance, - as the default value of 100 causes some overhead. - Please also note that particles - in the current implementation of the Reaction Ensemble are only - hidden with respect to Lennard-Jones and Coulomb interactions. Hiding - of other interactions, for example a magnetic, needs to be implemented - in the code. - """ - deref(self.RE).non_interacting_type = non_interacting_type - - def get_non_interacting_type(self): - """ - Returns the type which is used for hiding particles. - - """ - return deref(self.RE).non_interacting_type - - def add_reaction(self, *args, **kwargs): - """ - Sets up a reaction in the forward and backward direction. - - Parameters - ---------- - gamma : :obj:`float` - Equilibrium constant of the reaction, :math:`\\gamma` (see the User - guide, section 6.6 for the definition and further details). - reactant_types : list of :obj:`int` - List of particle types of reactants in the reaction. - reactant_coefficients : list of :obj:`int` - List of stoichiometric coefficients of the reactants in the same - order as the list of their types. - product_types : list of :obj:`int` - List of particle types of products in the reaction. - product_coefficients : list of :obj:`int` - List of stoichiometric coefficients of products of the reaction in - the same order as the list of their types - default_charges : :obj:`dict` - A dictionary of default charges for types that occur in the provided reaction. - check_for_electroneutrality : :obj:`bool` - Check for electroneutrality of the given reaction if ``True``. - - """ - self._params["check_for_electroneutrality"] = True - for k in self._required_keys_add(): - if k not in kwargs: - raise ValueError( - f"At least the following keys have to be given as keyword " - f"arguments: {self._required_keys()}, got {kwargs}") - self._params[k] = kwargs[k] - - for k in self._valid_keys_add(): - if k in kwargs: - self._params[k] = kwargs[k] - self._check_lengths_of_arrays() - self._validate_params_default_charge() - self._set_params_in_es_core_add() - - def _valid_keys_add(self): - return ("gamma", "reactant_types", "reactant_coefficients", - "product_types", "product_coefficients", "default_charges", - "check_for_electroneutrality") - - def _required_keys_add(self): - return ["gamma", "reactant_types", "reactant_coefficients", - "product_types", "product_coefficients", "default_charges"] - - def _check_lengths_of_arrays(self): - if(len(self._params["reactant_types"]) != len(self._params["reactant_coefficients"])): - raise ValueError( - "Reactants: Number of types and coefficients have to be equal") - if(len(self._params["product_types"]) != len(self._params["product_coefficients"])): - raise ValueError( - "Products: Number of types and coefficients have to be equal") - - def _set_params_in_es_core_add(self): - cdef vector[int] reactant_types - cdef vector[int] reactant_coefficients - cdef vector[int] product_types - cdef vector[int] product_coefficients - for value in self._params["reactant_types"]: - reactant_types.push_back(value) - for value in self._params["reactant_coefficients"]: - reactant_coefficients.push_back(value) - for value in self._params["product_types"]: - product_types.push_back(value) - for value in self._params["product_coefficients"]: - product_coefficients.push_back(value) - - # forward reaction - deref(self.RE).add_reaction( - self._params["gamma"], reactant_types, reactant_coefficients, product_types, product_coefficients) - # backward reaction - deref(self.RE).add_reaction( - 1.0 / self._params["gamma"], product_types, product_coefficients, reactant_types, reactant_coefficients) - - for key, value in self._params["default_charges"].items(): - deref(self.RE).charges_of_types[int(key)] = value - deref(self.RE).check_reaction_method() - - def _validate_params_default_charge(self): - if not isinstance(self._params["default_charges"], dict): - raise ValueError( - "No dictionary for relation between types and default charges provided.") - # check electroneutrality of the provided reaction - if self._params["check_for_electroneutrality"]: - charges = np.array(list(self._params["default_charges"].values())) - if np.count_nonzero(charges) == 0: - # all particles have zero charge - # no need to check electroneutrality - return - total_charge_change = 0.0 - for i in range(len(self._params["reactant_coefficients"])): - type_here = self._params["reactant_types"][i] - total_charge_change -= self._params["reactant_coefficients"][ - i] * self._params["default_charges"][type_here] - for j in range(len(self._params["product_coefficients"])): - type_here = self._params["product_types"][j] - total_charge_change += self._params["product_coefficients"][ - j] * self._params["default_charges"][type_here] - min_abs_nonzero_charge = np.min( - np.abs(charges[np.nonzero(charges)[0]])) - if abs(total_charge_change) / min_abs_nonzero_charge > 1e-10: - raise ValueError("Reaction system is not charge neutral") - - def reaction(self, reaction_steps=1): - """ - Performs randomly selected reactions. - - Parameters - ---------- - reaction_steps : :obj:`int`, optional - The number of reactions to be performed at once, defaults to 1. - - """ - deref(self.RE).do_reaction(int(reaction_steps)) - - def displacement_mc_move_for_particles_of_type(self, type_mc, - particle_number_to_be_changed=1): - """ - Performs a displacement Monte Carlo move for particles of given type. - New positions of the displaced particles are chosen from the whole box - with a uniform probability distribution. If there are multiple types, - that are being moved in a simulation, they should be moved in a random - order to avoid artefacts. - - Parameters - ---------- - type_mc : :obj:`int` - particle type which should be moved - - """ - - deref(self.RE).do_global_mc_move_for_particles_of_type( - type_mc, particle_number_to_be_changed) - - def get_status(self): - """ - Returns the status of the reaction ensemble in a dictionary containing - the used reactions, the used kT and the used exclusion radius. - - """ - deref(self.RE).check_reaction_method() - reactions = [] - for single_reaction_i in range(deref(self.RE).reactions.size()): - reactant_types = [] - for i in range( - deref(self.RE).reactions[single_reaction_i].reactant_types.size()): - reactant_types.append( - deref(self.RE).reactions[single_reaction_i].reactant_types[i]) - reactant_coefficients = [] - for i in range( - deref(self.RE).reactions[single_reaction_i].reactant_types.size()): - reactant_coefficients.append( - deref(self.RE).reactions[single_reaction_i].reactant_coefficients[i]) - - product_types = [] - for i in range( - deref(self.RE).reactions[single_reaction_i].product_types.size()): - product_types.append( - deref(self.RE).reactions[single_reaction_i].product_types[i]) - product_coefficients = [] - for i in range( - deref(self.RE).reactions[single_reaction_i].product_types.size()): - product_coefficients.append( - deref(self.RE).reactions[single_reaction_i].product_coefficients[i]) - reaction = {"reactant_coefficients": reactant_coefficients, - "reactant_types": reactant_types, - "product_types": product_types, - "product_coefficients": product_coefficients, - "reactant_types": reactant_types, - "gamma": deref(self.RE).reactions[single_reaction_i].gamma} - reactions.append(reaction) - - return {"reactions": reactions, "kT": deref( - self.RE).kT, "exclusion_radius": deref(self.RE).exclusion_radius} - - def delete_particle(self, p_id): - """ - Deletes the particle of the given p_id and makes sure that the particle - range has no holes. This function has some restrictions, as e.g. bonds - are not deleted. Therefore only apply this function to simple ions. - - """ - deref(self.RE).delete_particle(p_id) - - def change_reaction_constant(self, reaction_id, gamma): - """ - Changes the reaction constant of a given reaction - (for the forward and backward reaction). - The ``reaction_id`` which is assigned to a reaction - depends on the order in which :meth:`add_reaction` was called. - The 0th reaction has ``reaction_id=0``, the next added - reaction needs to be addressed with ``reaction_id=1``, etc. - - Parameters - ---------- - reaction_id : :obj:`int` - reaction_id - gamma : :obj:`float` - new reaction constant - - """ - reaction_id = int(reaction_id) - if(reaction_id > deref(self.RE).reactions.size() / 2 - 1 or reaction_id < 0): - raise ValueError( - "You provided an invalid reaction_id, please provide a valid reaction_id") - # for the forward reaction - deref(self.RE).reactions[2 * reaction_id].gamma = gamma - # for the backward reaction - deref(self.RE).reactions[2 * reaction_id + 1].gamma = 1.0 / gamma - - def delete_reaction(self, reaction_id): - """ - Delete a reaction from the set of used reactions - (the forward and backward reaction). - The ``reaction_id`` which is assigned to a reaction - depends on the order in which :meth:`add_reaction` was called. - The 0th reaction has ``reaction_id=0``, the next added - reaction needs to be addressed with ``reaction_id=1``, etc. - After the deletion of a reaction subsequent reactions - take the ``reaction_id`` of the deleted reaction. - - Parameters - ---------- - reaction_id : :obj:`int` - reaction_id - - """ - reaction_id = int(reaction_id) - if(reaction_id > deref(self.RE).reactions.size() / 2 - 1 or reaction_id < 0): - raise ValueError( - "You provided an invalid reaction_id, please provide a valid reaction_id") - deref(self.RE).delete_reaction(2 * reaction_id + 1) - deref(self.RE).delete_reaction(2 * reaction_id) - -cdef class ReactionEnsemble(ReactionAlgorithm): - """ - This class implements the Reaction Ensemble. - """ - - cdef unique_ptr[CReactionEnsemble] REptr - - def __init__(self, *args, **kwargs): - self._params = {"kT": 1, - "exclusion_radius": 0} - for k in self._required_keys(): - if k not in kwargs: - raise ValueError( - f"At least the following keys have to be given as keyword " - f"arguments: {self._required_keys()}, got {kwargs}") - self._params[k] = kwargs[k] - - self.REptr.reset(new CReactionEnsemble(int(self._params["seed"]))) - self.RE = self.REptr.get() - - for k in kwargs: - if k in self._valid_keys(): - self._params[k] = kwargs[k] - else: - raise KeyError(f"{k} is not a valid key") - - self._set_params_in_es_core() - -cdef class ConstantpHEnsemble(ReactionAlgorithm): - """ - This class implements the constant pH Ensemble. - - When adding an acid-base reaction, the acid and base particle types - are always assumed to be at index 0 of the lists passed to arguments - ``reactant_types`` and ``product_types``. - - """ - cdef unique_ptr[CConstantpHEnsemble] constpHptr - - def __init__(self, *args, **kwargs): - self._params = {"kT": 1, "exclusion_radius": 0} - for k in self._required_keys(): - if k not in kwargs: - raise ValueError( - f"At least the following keys have to be given as keyword " - f"arguments: {self._required_keys()}, got {kwargs}") - self._params[k] = kwargs[k] - - self.constpHptr.reset(new CConstantpHEnsemble(int(self._params["seed"]))) - self.RE = self.constpHptr.get() - - for k in kwargs: - if k in self._valid_keys(): - self._params[k] = kwargs[k] - else: - raise KeyError(f"{k} is not a valid key") - - self._set_params_in_es_core() - - def add_reaction(self, *args, **kwargs): - warn_msg = ( - "arguments 'reactant_coefficients' and 'product_coefficients' " - "are deprecated and are no longer necessary for the constant pH " - "ensemble. They are kept for backward compatibility but might " - "be deleted in future versions.") - err_msg = ("All product and reactant coefficients must equal one in " - "the constant pH method as implemented in ESPResSo.") - warn_user = False - - if "reactant_coefficients" in kwargs: - if kwargs["reactant_coefficients"][0] != 1: - raise ValueError(err_msg) - else: - warn_user = True - else: - kwargs["reactant_coefficients"] = [1] - - if "product_coefficients" in kwargs: - if kwargs["product_coefficients"][0] != 1 or kwargs["product_coefficients"][1] != 1: - raise ValueError(err_msg) - else: - warn_user = True - else: - kwargs["product_coefficients"] = [1, 1] - - if warn_user: - warnings.warn(warn_msg, FutureWarning) - - if(len(kwargs["product_types"]) != 2 or len(kwargs["reactant_types"]) != 1): - raise ValueError( - "The constant pH method is only implemented for reactions " - "with two product types and one adduct type.") - - super().add_reaction(*args, **kwargs) - - property constant_pH: - """ - Sets the input pH for the constant pH ensemble method. - - """ - - def __set__(self, double pH): - """ - Sets the pH that the method assumes for the implicit pH bath. - - """ - - deref(self.constpHptr).m_constant_pH = pH - -cdef class WidomInsertion(ReactionAlgorithm): - """ - This class implements the Widom insertion method in the canonical ensemble - for homogeneous systems, where the excess chemical potential is not - depending on the location. - - """ - - cdef unique_ptr[CWidomInsertion] WidomInsertionPtr - - def _required_keys(self): - return ("kT", "seed") - - def _valid_keys(self): - return ("kT", "seed") - - def _valid_keys_add(self): - return ("reactant_types", "reactant_coefficients", "product_types", - "product_coefficients", "default_charges", - "check_for_electroneutrality") - - def _required_keys_add(self): - return ("reactant_types", "reactant_coefficients", - "product_types", "product_coefficients", "default_charges") - - def __init__(self, *args, **kwargs): - self._params = {"kT": 1} - for k in self._required_keys(): - if k not in kwargs: - raise ValueError( - f"At least the following keys have to be given as keyword " - f"arguments: {self._required_keys()}, got {kwargs}") - self._params[k] = kwargs[k] - self._params["exclusion_radius"] = 0.0 # not used by this method - self._params["gamma"] = 1.0 # not used by this method - - self.WidomInsertionPtr.reset(new CWidomInsertion(int(self._params["seed"]))) - self.RE = self.WidomInsertionPtr.get() - for k in kwargs: - if k in self._valid_keys(): - self._params[k] = kwargs[k] - else: - raise KeyError(f"{k} is not a valid key") - - self._set_params_in_es_core() - - def calculate_particle_insertion_potential_energy(self, reaction_id=0): - """ - Measures the potential energy when particles are inserted in the - system following the reaction provided ``reaction_id``. Please - define the insertion moves first by calling the method - :meth:`~ReactionAlgorithm.add_reaction` (with only product types - specified). - - Note that although this function does not provide directly - the chemical potential, it can be used to calculate it. - For an example of such an application please check - :file:`/samples/widom_insertion.py`. - """ - # make inverse widom scheme (deletion of particles) inaccessible. - # The deletion reactions are the odd reaction_ids - if(reaction_id < 0 or reaction_id > (deref(self.WidomInsertionPtr).reactions.size() + 1) / 2): - raise ValueError("This reaction is not present") - return deref(self.WidomInsertionPtr).calculate_particle_insertion_potential_energy( - deref(self.WidomInsertionPtr).reactions[int(2 * reaction_id)]) - - def calculate_excess_chemical_potential( - self, particle_insertion_potential_energy_samples, N_blocks=16): - """ - Given a set of samples of the particle insertion potential energy, - calculates the excess chemical potential and its statistical error. - - Parameters - ---------- - particle_insertion_potential_energy_samples : array_like of :obj:`float` - Samples of the particle insertion potential energy. - N_blocks : :obj:`int`, optional - Number of bins for binning analysis. - - Returns - ------- - mean : :obj:`float` - Mean excess chemical potential. - error : :obj:`float` - Standard error of the mean. - """ - - def do_block_analysis(samples, N_blocks=16): - """ - Performs a binning analysis of samples. - Divides the samples in ``N_blocks`` equispaced blocks - and returns the mean and its uncertainty - """ - size_block = int(len(samples) / N_blocks) - block_list = [] - for block in range(N_blocks): - block_list.append( - np.mean(samples[block * size_block:(block + 1) * size_block])) - - sample_mean = np.mean(block_list) - sample_std = np.std(block_list, ddof=1) - sample_uncertainty = sample_std / np.sqrt(N_blocks) - - return sample_mean, sample_uncertainty - - gamma_samples = np.exp(-1.0 * np.array( - particle_insertion_potential_energy_samples) / self._params["kT"]) - - gamma_mean, gamma_std = do_block_analysis( - samples=gamma_samples, N_blocks=N_blocks) - - mu_ex_mean = -1 * np.log(gamma_mean) * self._params["kT"] - - # full propagation of error - mu_ex_Delta = 0.5 * self._params["kT"] * abs(-np.log(gamma_mean + gamma_std) - - (-np.log(gamma_mean - gamma_std))) - - return mu_ex_mean, mu_ex_Delta diff --git a/src/python/espressomd/script_interface.pyx b/src/python/espressomd/script_interface.pyx index 94546abe719..531639c8f1c 100644 --- a/src/python/espressomd/script_interface.pyx +++ b/src/python/espressomd/script_interface.pyx @@ -15,8 +15,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . import numpy as np -from .utils import to_char_pointer, to_str, handle_errors, is_valid_type -from .utils cimport Vector3d, make_array_locked +from .utils import to_char_pointer, to_str, handle_errors, array_locked, is_valid_type +from .utils cimport Vector2d, Vector3d, Vector4d, make_array_locked cimport cpython.object from libcpp.memory cimport make_shared @@ -229,6 +229,8 @@ cdef variant_to_python_object(const Variant & value) except +: cdef vector[Variant] vec cdef unordered_map[int, Variant] vmap cdef shared_ptr[ObjectHandle] ptr + cdef Vector2d vec2d + cdef Vector4d vec4d if is_none(value): return None if is_type[bool](value): @@ -243,8 +245,14 @@ cdef variant_to_python_object(const Variant & value) except +: return get_value[vector[int]](value) if is_type[vector[double]](value): return get_value[vector[double]](value) + if is_type[Vector4d](value): + vec4d = get_value[Vector4d](value) + return array_locked([vec4d[0], vec4d[1], vec4d[2], vec4d[3]]) if is_type[Vector3d](value): return make_array_locked(get_value[Vector3d](value)) + if is_type[Vector2d](value): + vec2d = get_value[Vector2d](value) + return array_locked([vec2d[0], vec2d[1]]) if is_type[shared_ptr[ObjectHandle]](value): # Get the id and build a corresponding object ptr = get_value[shared_ptr[ObjectHandle]](value) diff --git a/src/python/espressomd/utils.pyx b/src/python/espressomd/utils.pyx index c2d9860c644..7b465b771a1 100644 --- a/src/python/espressomd/utils.pyx +++ b/src/python/espressomd/utils.pyx @@ -315,3 +315,21 @@ def requires_experimental_features(reason): ELSE: # Return original class return lambda x: x + + +def check_required_keys(required_keys, obtained_keys): + a = required_keys + b = obtained_keys + if not set(a).issubset(b): + raise ValueError( + "The following keys have to be given as keyword arguments: " + f"{sorted(a)}, got {sorted(b)} (missing {sorted(a - b)})") + + +def check_valid_keys(valid_keys, obtained_keys): + a = valid_keys + b = obtained_keys + if not set(b).issubset(a): + raise ValueError( + "Only the following keys can be given as keyword arguments: " + f"{sorted(a)}, got {sorted(b)} (unknown {sorted(b - a)})") diff --git a/src/script_interface/CMakeLists.txt b/src/script_interface/CMakeLists.txt index 1a7751e3194..cc2a9e1373d 100644 --- a/src/script_interface/CMakeLists.txt +++ b/src/script_interface/CMakeLists.txt @@ -16,6 +16,7 @@ add_subdirectory(pair_criteria) add_subdirectory(mpiio) add_subdirectory(shapes) add_subdirectory(h5md) +add_subdirectory(reaction_methods) install(TARGETS ScriptInterface LIBRARY DESTINATION ${PYTHON_INSTDIR}/espressomd) diff --git a/src/script_interface/GlobalContext.hpp b/src/script_interface/GlobalContext.hpp index 44f09222046..5dd712f83e5 100644 --- a/src/script_interface/GlobalContext.hpp +++ b/src/script_interface/GlobalContext.hpp @@ -161,7 +161,7 @@ class GlobalContext : public Context { boost::string_ref name(const ObjectHandle *o) const override; - bool is_head_node() const override { return m_is_head_node; }; + bool is_head_node() const override { return m_is_head_node; } }; } // namespace ScriptInterface diff --git a/src/script_interface/LocalContext.hpp b/src/script_interface/LocalContext.hpp index 64b4fad1f96..e057c26d0f2 100644 --- a/src/script_interface/LocalContext.hpp +++ b/src/script_interface/LocalContext.hpp @@ -67,7 +67,7 @@ class LocalContext : public Context { return factory().type_name(*o); } - bool is_head_node() const override { return m_is_head_node; }; + bool is_head_node() const override { return m_is_head_node; } }; } // namespace ScriptInterface diff --git a/src/script_interface/ObjectList.hpp b/src/script_interface/ObjectList.hpp index 19b57e892fe..b4196679bdf 100644 --- a/src/script_interface/ObjectList.hpp +++ b/src/script_interface/ObjectList.hpp @@ -31,6 +31,7 @@ #include #include #include +#include #include namespace ScriptInterface { @@ -108,7 +109,6 @@ class ObjectList : public BaseType { if (method == "get_elements") { std::vector ret; ret.reserve(m_elements.size()); - for (auto const &e : m_elements) ret.emplace_back(e); diff --git a/src/script_interface/cluster_analysis/ClusterStructure.hpp b/src/script_interface/cluster_analysis/ClusterStructure.hpp index 29097355d61..f9ad0e4b8ef 100644 --- a/src/script_interface/cluster_analysis/ClusterStructure.hpp +++ b/src/script_interface/cluster_analysis/ClusterStructure.hpp @@ -45,10 +45,10 @@ class ClusterStructure : public AutoParameters { get_value>(value); if (m_pc) { m_cluster_structure.set_pair_criterion(m_pc->pair_criterion()); - }; + } }, [this]() { return m_pc; }}}); - }; + } Variant do_call_method(std::string const &method, VariantMap const ¶meters) override { if (method == "get_cluster") { diff --git a/src/script_interface/collision_detection/CollisionDetection.hpp b/src/script_interface/collision_detection/CollisionDetection.hpp index 4b4cf8d040f..cc336b717d4 100644 --- a/src/script_interface/collision_detection/CollisionDetection.hpp +++ b/src/script_interface/collision_detection/CollisionDetection.hpp @@ -24,12 +24,13 @@ #include "config.hpp" -#include "core/collision.hpp" +#ifdef COLLISION_DETECTION + #include "script_interface/ScriptInterface.hpp" -#include +#include "core/collision.hpp" -#ifdef COLLISION_DETECTION +#include namespace ScriptInterface { namespace CollisionDetection { @@ -63,9 +64,9 @@ class CollisionDetection : public AutoParameters { const VariantMap ¶ms) override { if (name == "validate") { return validate_collision_parameters(); - }; + } return none; - }; + } }; } /* namespace CollisionDetection */ diff --git a/src/script_interface/constraints/Constraints.hpp b/src/script_interface/constraints/Constraints.hpp index cc3cb1b678d..4cf0ba24083 100644 --- a/src/script_interface/constraints/Constraints.hpp +++ b/src/script_interface/constraints/Constraints.hpp @@ -37,7 +37,7 @@ class Constraints : public ObjectList { } void remove_in_core(std::shared_ptr const &obj_ptr) override { ::Constraints::constraints.remove(obj_ptr->constraint()); - }; + } }; } /* namespace Constraints */ } /* namespace ScriptInterface */ diff --git a/src/script_interface/constraints/ShapeBasedConstraint.hpp b/src/script_interface/constraints/ShapeBasedConstraint.hpp index 0e28ba38745..49793a599de 100644 --- a/src/script_interface/constraints/ShapeBasedConstraint.hpp +++ b/src/script_interface/constraints/ShapeBasedConstraint.hpp @@ -53,7 +53,7 @@ class ShapeBasedConstraint : public Constraint { get_value>(value); if (m_shape) { m_constraint->set_shape(m_shape->shape()); - }; + } }, [this]() { return m_shape; }}, {"particle_velocity", m_constraint->velocity()}}); diff --git a/src/script_interface/initialize.cpp b/src/script_interface/initialize.cpp index 9664399afb6..2a55eb1e528 100644 --- a/src/script_interface/initialize.cpp +++ b/src/script_interface/initialize.cpp @@ -37,6 +37,7 @@ #include "lbboundaries/initialize.hpp" #include "mpiio/initialize.hpp" #include "observables/initialize.hpp" +#include "reaction_methods/initialize.hpp" #include "virtual_sites/initialize.hpp" namespace ScriptInterface { @@ -56,6 +57,7 @@ void initialize(Utils::Factory *f) { VirtualSites::initialize(f); MPIIO::initialize(f); CollisionDetection::initialize(f); + ReactionMethods::initialize(f); f->register_new("ComFixed"); f->register_new( diff --git a/src/script_interface/interactions/bonded.hpp b/src/script_interface/interactions/bonded.hpp index 60004a4997b..df53b89b338 100644 --- a/src/script_interface/interactions/bonded.hpp +++ b/src/script_interface/interactions/bonded.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef _SCRIPT_INTERFACE_INTERACTIONS_BONDED_HPP -#define _SCRIPT_INTERFACE_INTERACTIONS_BONDED_HPP +#ifndef SCRIPT_INTERFACE_INTERACTIONS_BONDED_HPP +#define SCRIPT_INTERFACE_INTERACTIONS_BONDED_HPP /** @file * Functions to interface with the core boost::variant. diff --git a/src/script_interface/lbboundaries/LBBoundary.hpp b/src/script_interface/lbboundaries/LBBoundary.hpp index 01a740b20c9..fdd53e6360f 100644 --- a/src/script_interface/lbboundaries/LBBoundary.hpp +++ b/src/script_interface/lbboundaries/LBBoundary.hpp @@ -21,12 +21,13 @@ #include "config.hpp" -#include "core/grid_based_algorithms/lb_interface.hpp" -#include "core/grid_based_algorithms/lbboundaries/LBBoundary.hpp" #include "script_interface/ScriptInterface.hpp" #include "script_interface/auto_parameters/AutoParameters.hpp" #include "script_interface/shapes/Shape.hpp" +#include "core/grid_based_algorithms/lb_interface.hpp" +#include "core/grid_based_algorithms/lbboundaries/LBBoundary.hpp" + #include #include @@ -47,7 +48,7 @@ class LBBoundary : public AutoParameters { if (m_shape) { m_lbboundary->set_shape(m_shape->shape()); - }; + } }, [this]() { return m_shape; }}}); #ifdef EK_BOUNDARIES @@ -89,7 +90,8 @@ class LBBoundary : public AutoParameters { /* Keep a reference to the shape */ std::shared_ptr m_shape; -}; // class LBBoundary +}; + } // namespace LBBoundaries } /* namespace ScriptInterface */ #endif diff --git a/src/script_interface/mpiio/initialize.cpp b/src/script_interface/mpiio/initialize.cpp index 95f18615f4f..954e2dba466 100644 --- a/src/script_interface/mpiio/initialize.cpp +++ b/src/script_interface/mpiio/initialize.cpp @@ -18,7 +18,7 @@ */ #include "initialize.hpp" -#include "si_mpiio.hpp" +#include "mpiio.hpp" namespace ScriptInterface { namespace MPIIO { diff --git a/src/script_interface/mpiio/si_mpiio.hpp b/src/script_interface/mpiio/mpiio.hpp similarity index 70% rename from src/script_interface/mpiio/si_mpiio.hpp rename to src/script_interface/mpiio/mpiio.hpp index 966bd10c1d6..72ab1dbd056 100644 --- a/src/script_interface/mpiio/si_mpiio.hpp +++ b/src/script_interface/mpiio/mpiio.hpp @@ -19,19 +19,17 @@ * along with this program. If not, see . */ -#ifndef ESPRESSO_SCRIPTINTERFACE_MPIIO_HPP -#define ESPRESSO_SCRIPTINTERFACE_MPIIO_HPP +#ifndef ESPRESSO_SCRIPT_INTERFACE_MPIIO_MPIIO_HPP +#define ESPRESSO_SCRIPT_INTERFACE_MPIIO_MPIIO_HPP -#include "config.hpp" -#include "io/mpiio/mpiio.hpp" #include "script_interface/ScriptInterface.hpp" #include "script_interface/auto_parameters/AutoParameters.hpp" #include "script_interface/get_value.hpp" -#include -#include +#include "core/cells.hpp" +#include "core/io/mpiio/mpiio.hpp" -#define field_value(use, v) ((use) ? (v) : 0u) +#include namespace ScriptInterface { namespace MPIIO { @@ -43,22 +41,22 @@ class MPIIOScript : public AutoParameters { Variant do_call_method(const std::string &name, const VariantMap ¶meters) override { - auto pref = get_value(parameters.at("prefix")); + auto prefix = get_value(parameters.at("prefix")); auto pos = get_value(parameters.at("pos")); auto vel = get_value(parameters.at("vel")); auto typ = get_value(parameters.at("typ")); - auto bond = get_value(parameters.at("bond")); + auto bnd = get_value(parameters.at("bond")); - unsigned v = field_value(pos, Mpiio::MPIIO_OUT_POS) | - field_value(vel, Mpiio::MPIIO_OUT_VEL) | - field_value(typ, Mpiio::MPIIO_OUT_TYP) | - field_value(bond, Mpiio::MPIIO_OUT_BND); + auto const fields = ((pos) ? Mpiio::MPIIO_OUT_POS : Mpiio::MPIIO_OUT_NON) | + ((vel) ? Mpiio::MPIIO_OUT_VEL : Mpiio::MPIIO_OUT_NON) | + ((typ) ? Mpiio::MPIIO_OUT_TYP : Mpiio::MPIIO_OUT_NON) | + ((bnd) ? Mpiio::MPIIO_OUT_BND : Mpiio::MPIIO_OUT_NON); if (name == "write") - Mpiio::mpi_mpiio_common_write(pref.c_str(), v, + Mpiio::mpi_mpiio_common_write(prefix, fields, cell_structure.local_particles()); else if (name == "read") - Mpiio::mpi_mpiio_common_read(pref.c_str(), v); + Mpiio::mpi_mpiio_common_read(prefix, fields); return {}; } diff --git a/src/script_interface/reaction_methods/CMakeLists.txt b/src/script_interface/reaction_methods/CMakeLists.txt new file mode 100644 index 00000000000..43c571ff9a2 --- /dev/null +++ b/src/script_interface/reaction_methods/CMakeLists.txt @@ -0,0 +1,2 @@ +target_sources(ScriptInterface + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/initialize.cpp) diff --git a/src/script_interface/reaction_methods/ConstantpHEnsemble.hpp b/src/script_interface/reaction_methods/ConstantpHEnsemble.hpp new file mode 100644 index 00000000000..5016c8e7504 --- /dev/null +++ b/src/script_interface/reaction_methods/ConstantpHEnsemble.hpp @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2021 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef SCRIPT_INTERFACE_REACTION_METHODS_CONSTANT_PH_HPP +#define SCRIPT_INTERFACE_REACTION_METHODS_CONSTANT_PH_HPP + +#include "ReactionAlgorithm.hpp" + +#include "script_interface/ScriptInterface.hpp" + +#include "core/reaction_methods/ConstantpHEnsemble.hpp" +#include "core/reaction_methods/ReactionAlgorithm.hpp" + +#include + +namespace ScriptInterface { +namespace ReactionMethods { + +class ConstantpHEnsemble : public ReactionAlgorithm { +public: + std::shared_ptr<::ReactionMethods::ReactionAlgorithm> RE() override { + return m_re; + } + + ConstantpHEnsemble() { + add_parameters({ + {"constant_pH", + [this](Variant const &v) { + m_re->m_constant_pH = get_value(v); + }, + [this]() { return m_re->m_constant_pH; }}, + }); + } + + void do_construct(VariantMap const ¶ms) override { + m_re = std::make_shared<::ReactionMethods::ConstantpHEnsemble>( + get_value(params, "seed"), get_value(params, "kT"), + get_value(params, "exclusion_radius"), + get_value(params, "constant_pH")); + } + +private: + std::shared_ptr<::ReactionMethods::ConstantpHEnsemble> m_re; +}; +} /* namespace ReactionMethods */ +} /* namespace ScriptInterface */ + +#endif \ No newline at end of file diff --git a/src/script_interface/reaction_methods/ReactionAlgorithm.hpp b/src/script_interface/reaction_methods/ReactionAlgorithm.hpp new file mode 100644 index 00000000000..f6028b70e09 --- /dev/null +++ b/src/script_interface/reaction_methods/ReactionAlgorithm.hpp @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2021 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef SCRIPT_INTERFACE_REACTION_METHODS_REACTION_ALGORITHM_HPP +#define SCRIPT_INTERFACE_REACTION_METHODS_REACTION_ALGORITHM_HPP + +#include "SingleReaction.hpp" + +#include "script_interface/ScriptInterface.hpp" + +#include "core/reaction_methods/ReactionAlgorithm.hpp" + +#include +#include +#include +#include + +namespace ScriptInterface { +namespace ReactionMethods { + +class ReactionAlgorithm : public AutoParameters { +protected: + /** Keep track of the script interface pointer of each reaction. */ + std::vector> m_reactions; + /** + * Check reaction id is within the reaction container bounds. + * Since each reaction has a corresponding backward reaction, + * the total number of reactions is doubled. Return the + * corresponding index for @ref ReactionAlgorithm::m_reactions. + */ + int get_reaction_index(int reaction_id) const { + auto const index = 2 * reaction_id; + if (index < 0 or index >= static_cast(m_reactions.size())) { + throw std::out_of_range("This reaction is not present"); + } + return index; + } + +public: + virtual std::shared_ptr<::ReactionMethods::ReactionAlgorithm> RE() = 0; + + ReactionAlgorithm() { + add_parameters({ + {"reactions", AutoParameter::read_only, + [this]() { + std::vector out; + for (auto const &e : m_reactions) { + out.emplace_back(e); + } + return out; + }}, + {"kT", AutoParameter::read_only, [this]() { return RE()->get_kT(); }}, + {"exclusion_radius", AutoParameter::read_only, + [this]() { return RE()->get_exclusion_radius(); }}, + }); + } + + Variant do_call_method(std::string const &name, + VariantMap const ¶meters) override { + if (name == "remove_constraint") { + RE()->remove_constraint(); + } else if (name == "set_cylindrical_constraint_in_z_direction") { + RE()->set_cyl_constraint(get_value(parameters, "center_x"), + get_value(parameters, "center_y"), + get_value(parameters, "radius")); + } else if (name == "set_wall_constraints_in_z_direction") { + RE()->set_slab_constraint(get_value(parameters, "slab_start_z"), + get_value(parameters, "slab_end_z")); + } else if (name == "get_wall_constraints_in_z_direction") { + return RE()->get_slab_constraint_parameters(); + } else if (name == "set_volume") { + RE()->set_volume(get_value(parameters, "volume")); + } else if (name == "get_volume") { + return RE()->get_volume(); + } else if (name == "get_acceptance_rate_reaction") { + auto const index = get_value(parameters, "reaction_id"); + if (index < 0 or index >= static_cast(m_reactions.size())) { + throw std::out_of_range("This reaction is not present"); + } + return m_reactions[index]->get_reaction()->get_acceptance_rate(); + } else if (name == "set_non_interacting_type") { + RE()->non_interacting_type = get_value(parameters, "type"); + } else if (name == "get_non_interacting_type") { + return RE()->non_interacting_type; + } else if (name == "reaction") { + RE()->do_reaction(get_value_or(parameters, "reaction_steps", 1)); + } else if (name == "displacement_mc_move_for_particles_of_type") { + RE()->do_global_mc_move_for_particles_of_type( + get_value(parameters, "type_mc"), + get_value_or(parameters, "particle_number_to_be_changed", 1)); + } else if (name == "check_reaction_method") { + RE()->check_reaction_method(); + } else if (name == "delete_particle") { + RE()->delete_particle(get_value(parameters, "p_id")); + } else if (name == "delete_reaction") { + auto const reaction_id = get_value(parameters, "reaction_id"); + auto const index = get_reaction_index(reaction_id); + // delete forward and backward reactions + delete_reaction(index + 1); + delete_reaction(index + 0); + } else if (name == "add_reaction") { + auto const reaction = + get_value>(parameters, "reaction"); + m_reactions.push_back(reaction); + RE()->add_reaction(reaction->get_reaction()); + } else if (name == "change_reaction_constant") { + auto const gamma = get_value(parameters, "gamma"); + auto const reaction_id = get_value(parameters, "reaction_id"); + auto const index = get_reaction_index(reaction_id); + m_reactions[index + 0]->get_reaction()->gamma = gamma; + m_reactions[index + 1]->get_reaction()->gamma = 1. / gamma; + } else if (name == "set_charge_of_type") { + auto const type = get_value(parameters, "type"); + auto const charge = get_value(parameters, "charge"); + RE()->charges_of_types[type] = charge; + } else { + throw std::runtime_error(("unknown method '" + name + "()'").c_str()); + } + return {}; + }; + +private: + void delete_reaction(int reaction_id) { + m_reactions.erase(m_reactions.begin() + reaction_id); + RE()->delete_reaction(reaction_id); + } +}; +} /* namespace ReactionMethods */ +} /* namespace ScriptInterface */ + +#endif \ No newline at end of file diff --git a/src/script_interface/reaction_methods/ReactionEnsemble.hpp b/src/script_interface/reaction_methods/ReactionEnsemble.hpp new file mode 100644 index 00000000000..cdd98e1aaa6 --- /dev/null +++ b/src/script_interface/reaction_methods/ReactionEnsemble.hpp @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2021 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef SCRIPT_INTERFACE_REACTION_METHODS_REACTION_ENSEMBLE_HPP +#define SCRIPT_INTERFACE_REACTION_METHODS_REACTION_ENSEMBLE_HPP + +#include "ReactionAlgorithm.hpp" + +#include "script_interface/ScriptInterface.hpp" + +#include "core/reaction_methods/ReactionAlgorithm.hpp" +#include "core/reaction_methods/ReactionEnsemble.hpp" + +#include + +namespace ScriptInterface { +namespace ReactionMethods { + +class ReactionEnsemble : public ReactionAlgorithm { +public: + std::shared_ptr<::ReactionMethods::ReactionAlgorithm> RE() override { + return m_re; + } + + void do_construct(VariantMap const ¶ms) override { + m_re = std::make_shared<::ReactionMethods::ReactionEnsemble>( + get_value(params, "seed"), get_value(params, "kT"), + get_value(params, "exclusion_radius")); + } + +private: + std::shared_ptr<::ReactionMethods::ReactionEnsemble> m_re; +}; +} /* namespace ReactionMethods */ +} /* namespace ScriptInterface */ + +#endif \ No newline at end of file diff --git a/src/script_interface/reaction_methods/SingleReaction.hpp b/src/script_interface/reaction_methods/SingleReaction.hpp new file mode 100644 index 00000000000..dfd0c86b19f --- /dev/null +++ b/src/script_interface/reaction_methods/SingleReaction.hpp @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2021 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef SCRIPT_INTERFACE_REACTION_METHODS_SINGLE_REACTION_HPP +#define SCRIPT_INTERFACE_REACTION_METHODS_SINGLE_REACTION_HPP + +#include "core/reaction_methods/SingleReaction.hpp" +#include "script_interface/ScriptInterface.hpp" +#include +#include + +namespace ScriptInterface { +namespace ReactionMethods { + +class SingleReaction : public AutoParameters { +public: + SingleReaction() { + add_parameters({ + {"gamma", AutoParameter::read_only, [this]() { return m_sr->gamma; }}, + {"reactant_types", AutoParameter::read_only, + [this]() { return m_sr->reactant_types; }}, + {"reactant_coefficients", AutoParameter::read_only, + [this]() { return m_sr->reactant_coefficients; }}, + {"product_types", AutoParameter::read_only, + [this]() { return m_sr->product_types; }}, + {"product_coefficients", AutoParameter::read_only, + [this]() { return m_sr->product_coefficients; }}, + }); + } + + void do_construct(VariantMap const ¶ms) override { + m_sr = std::make_shared<::ReactionMethods::SingleReaction>( + get_value(params, "gamma"), + get_value>(params, "reactant_types"), + get_value>(params, "reactant_coefficients"), + get_value>(params, "product_types"), + get_value>(params, "product_coefficients")); + } + + std::shared_ptr<::ReactionMethods::SingleReaction> get_reaction() { + return m_sr; + } + +private: + std::shared_ptr<::ReactionMethods::SingleReaction> m_sr; +}; +} /* namespace ReactionMethods */ +} /* namespace ScriptInterface */ + +#endif \ No newline at end of file diff --git a/src/script_interface/reaction_methods/WidomInsertion.hpp b/src/script_interface/reaction_methods/WidomInsertion.hpp new file mode 100644 index 00000000000..f60045507df --- /dev/null +++ b/src/script_interface/reaction_methods/WidomInsertion.hpp @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2021 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef SCRIPT_INTERFACE_REACTION_METHODS_WIDOM_INSERTION_HPP +#define SCRIPT_INTERFACE_REACTION_METHODS_WIDOM_INSERTION_HPP + +#include "ReactionAlgorithm.hpp" + +#include "script_interface/ScriptInterface.hpp" + +#include "core/reaction_methods/ReactionAlgorithm.hpp" +#include "core/reaction_methods/WidomInsertion.hpp" + +#include +#include +#include + +namespace ScriptInterface { +namespace ReactionMethods { + +class WidomInsertion : public ReactionAlgorithm { +public: + std::shared_ptr<::ReactionMethods::ReactionAlgorithm> RE() override { + return m_re; + } + + void do_construct(VariantMap const ¶ms) override { + m_re = std::make_shared<::ReactionMethods::WidomInsertion>( + get_value(params, "seed"), get_value(params, "kT"), 0.); + } + + Variant do_call_method(std::string const &name, + VariantMap const ¶meters) override { + if (name == "calculate_particle_insertion_potential_energy") { + auto const reaction_id = get_value(parameters, "reaction_id"); + auto const index = get_reaction_index(reaction_id); + auto &reaction = *m_reactions[index]->get_reaction(); + return m_re->calculate_particle_insertion_potential_energy(reaction); + } + return ReactionAlgorithm::do_call_method(name, parameters); + } + +private: + std::shared_ptr<::ReactionMethods::WidomInsertion> m_re; +}; +} /* namespace ReactionMethods */ +} /* namespace ScriptInterface */ + +#endif \ No newline at end of file diff --git a/src/script_interface/reaction_methods/initialize.cpp b/src/script_interface/reaction_methods/initialize.cpp new file mode 100644 index 00000000000..eb01924dc19 --- /dev/null +++ b/src/script_interface/reaction_methods/initialize.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2021 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "initialize.hpp" + +#include "SingleReaction.hpp" + +#include "ConstantpHEnsemble.hpp" +#include "ReactionEnsemble.hpp" +#include "WidomInsertion.hpp" + +#include "script_interface/ScriptInterface.hpp" + +namespace ScriptInterface { +namespace ReactionMethods { +void initialize(Utils::Factory *om) { + om->register_new("ReactionMethods::SingleReaction"); + om->register_new("ReactionMethods::WidomInsertion"); + om->register_new("ReactionMethods::ReactionEnsemble"); + om->register_new("ReactionMethods::ConstantpHEnsemble"); +} +} // namespace ReactionMethods +} // namespace ScriptInterface diff --git a/src/script_interface/reaction_methods/initialize.hpp b/src/script_interface/reaction_methods/initialize.hpp new file mode 100644 index 00000000000..5890b3411c3 --- /dev/null +++ b/src/script_interface/reaction_methods/initialize.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2021 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef SCRIPT_INTERFACE_REACTION_METHODS_INITIALIZE_HPP +#define SCRIPT_INTERFACE_REACTION_METHODS_INITIALIZE_HPP + +#include +#include + +namespace ScriptInterface { +namespace ReactionMethods { +void initialize(Utils::Factory *om); +} // namespace ReactionMethods +} // namespace ScriptInterface + +#endif diff --git a/src/script_interface/tests/CMakeLists.txt b/src/script_interface/tests/CMakeLists.txt index bdbf22b8c6f..540f0bb0013 100644 --- a/src/script_interface/tests/CMakeLists.txt +++ b/src/script_interface/tests/CMakeLists.txt @@ -37,6 +37,9 @@ unit_test(NAME packed_variant_test SRC packed_variant_test.cpp DEPENDS ScriptInterface) unit_test(NAME ObjectList_test SRC ObjectList_test.cpp DEPENDS ScriptInterface) unit_test(NAME ObjectMap_test SRC ObjectMap_test.cpp DEPENDS ScriptInterface) +unit_test(NAME serialization_mpi_guard_test SRC + serialization_mpi_guard_test.cpp DEPENDS ScriptInterface Boost::mpi + MPI::MPI_CXX NUM_PROC 2) unit_test(NAME Accumulators_test SRC Accumulators_test.cpp DEPENDS ScriptInterface) unit_test(NAME Constraints_test SRC Constraints_test.cpp DEPENDS diff --git a/src/script_interface/tests/ObjectHandle_test.cpp b/src/script_interface/tests/ObjectHandle_test.cpp index 65237f500ad..7ed7b1aa6b6 100644 --- a/src/script_interface/tests/ObjectHandle_test.cpp +++ b/src/script_interface/tests/ObjectHandle_test.cpp @@ -23,10 +23,15 @@ #define BOOST_TEST_DYN_LINK #include -#include - +#include "script_interface/ObjectHandle.hpp" +#include "script_interface/ObjectState.hpp" #include "script_interface/ScriptInterface.hpp" +#include + +#include +#include + #include #include #include @@ -92,6 +97,30 @@ BOOST_AUTO_TEST_CASE(non_copyable) { BOOST_TEST_PASSPOINT(); } +BOOST_AUTO_TEST_CASE(default_constructible) { + ObjectHandle handle; + + auto const param_name = std::string("unknown"); + handle.construct({{param_name, Variant{1}}}); + BOOST_CHECK(is_type(handle.get_parameter(param_name))); + BOOST_CHECK(is_type(handle.call_method("foo", {}))); + + // serialization should be empty + auto const bytestring_obj = handle.serialize(); + auto const bytestring_ref = Utils::pack(ObjectState{}); + BOOST_CHECK_EQUAL(bytestring_obj, bytestring_ref); + + // serialization of an empty ObjectState should only contain the library + // version and a string "serialization::archive", followed by a few integers + auto const trim_null_terminator_right = [](std::string const &s) { + return boost::trim_right_copy_if(s, [](char const c) { return c == '\0'; }); + }; + auto const bytestring_nul = Utils::pack(std::string{}); + auto const metadata_obj = trim_null_terminator_right(bytestring_obj); + auto const metadata_ref = trim_null_terminator_right(bytestring_nul); + BOOST_CHECK_EQUAL(metadata_obj, metadata_ref); +} + /* * Check that the call to ObjectHandle::construct is * forwarded correctly to the implementation. @@ -164,7 +193,7 @@ struct LogContext : public Context { return "Dummy"; } - bool is_head_node() const override { return true; }; + bool is_head_node() const override { return true; } }; } // namespace Testing diff --git a/src/script_interface/tests/serialization_mpi_guard_test.cpp b/src/script_interface/tests/serialization_mpi_guard_test.cpp new file mode 100644 index 00000000000..88fea91e851 --- /dev/null +++ b/src/script_interface/tests/serialization_mpi_guard_test.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2022 The ESPResSo project + * + * This file is part of ESPResSo. + * + * ESPResSo is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * ESPResSo is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define BOOST_TEST_NO_MAIN +#define BOOST_TEST_MODULE Object container MPI guard test +#define BOOST_TEST_DYN_LINK +#include + +#include "script_interface/ObjectList.hpp" + +#include + +#include +#include +#include +#include +#include + +using ScriptInterface::ObjectHandle; + +namespace Testing { +struct ObjectContainer : ScriptInterface::ObjectList { + std::vector> objects; + +private: + void add_in_core(std::shared_ptr const &obj_ptr) override { + objects.push_back(obj_ptr); + } + void remove_in_core(std::shared_ptr const &obj_ptr) override { + objects.erase(std::remove(objects.begin(), objects.end(), obj_ptr), + objects.end()); + } +}; +} // namespace Testing + +BOOST_AUTO_TEST_CASE(parallel_exception) { + boost::mpi::communicator world; + auto const obj_ptr = std::make_shared(); + auto const predicate = [](std::exception const &ex) { + std::string message = + "Non-empty object containers do not support checkpointing " + "in MPI environments. Container contains 1 elements."; + return ex.what() == message; + }; + + Testing::ObjectContainer list; + BOOST_CHECK_NO_THROW(list.serialize()); + + list.add(obj_ptr); + if (world.size() > 1) { + BOOST_CHECK_EXCEPTION(list.serialize(), std::runtime_error, predicate); + } + + list.remove(obj_ptr); + BOOST_CHECK_NO_THROW(list.serialize()); +} + +int main(int argc, char **argv) { + boost::mpi::environment mpi_env(argc, argv); + + return boost::unit_test::unit_test_main(init_unit_test, argc, argv); +} diff --git a/src/script_interface/virtual_sites/ActiveVirtualSitesHandle.hpp b/src/script_interface/virtual_sites/ActiveVirtualSitesHandle.hpp index b105fb95ff4..38d0a2251ca 100644 --- a/src/script_interface/virtual_sites/ActiveVirtualSitesHandle.hpp +++ b/src/script_interface/virtual_sites/ActiveVirtualSitesHandle.hpp @@ -22,14 +22,18 @@ #ifndef SCRIPT_INTERFACE_VIRTUAL_SITES_ACTIVE_VIRTUAL_SITES_HANDLE_HPP #define SCRIPT_INTERFACE_VIRTUAL_SITES_ACTIVE_VIRTUAL_SITES_HANDLE_HPP -#include "VirtualSites.hpp" #include "config.hpp" -#include "core/virtual_sites.hpp" -#include "errorhandling.hpp" -#include "script_interface/auto_parameters/AutoParameters.hpp" #ifdef VIRTUAL_SITES +#include "VirtualSites.hpp" +#include "script_interface/auto_parameters/AutoParameters.hpp" + +#include "core/errorhandling.hpp" +#include "core/virtual_sites.hpp" + +#include + namespace ScriptInterface { namespace VirtualSites { @@ -52,5 +56,5 @@ class ActiveVirtualSitesHandle }; } /* namespace VirtualSites */ } /* namespace ScriptInterface */ -#endif +#endif // VIRTUAL_SITES #endif diff --git a/src/script_interface/virtual_sites/VirtualSites.hpp b/src/script_interface/virtual_sites/VirtualSites.hpp index 26196daaf15..3d5ae5a6cde 100644 --- a/src/script_interface/virtual_sites/VirtualSites.hpp +++ b/src/script_interface/virtual_sites/VirtualSites.hpp @@ -23,15 +23,18 @@ #define SCRIPT_INTERFACE_VIRTUAL_SITES_VIRTUAL_SITES_HPP #include "config.hpp" -#include "core/virtual_sites.hpp" + +#ifdef VIRTUAL_SITES + #include "script_interface/auto_parameters/AutoParameters.hpp" +#include "core/virtual_sites.hpp" + #include namespace ScriptInterface { namespace VirtualSites { -#ifdef VIRTUAL_SITES class VirtualSites : public AutoParameters { public: VirtualSites() { @@ -40,14 +43,13 @@ class VirtualSites : public AutoParameters { [this](const Variant &v) { virtual_sites()->set_have_quaternion(get_value(v)); }, - [this]() { return virtual_sites()->get_have_quaternion(); }}}); + [this]() { return virtual_sites()->have_quaternions(); }}}); } /** Vs implementation we are wrapping */ virtual std::shared_ptr<::VirtualSites> virtual_sites() = 0; }; -#endif - } /* namespace VirtualSites */ } /* namespace ScriptInterface */ +#endif // VIRTUAL_SITES #endif diff --git a/src/script_interface/virtual_sites/VirtualSitesInertialessTracers.hpp b/src/script_interface/virtual_sites/VirtualSitesInertialessTracers.hpp index 0b473d32843..1cc84c5365c 100644 --- a/src/script_interface/virtual_sites/VirtualSitesInertialessTracers.hpp +++ b/src/script_interface/virtual_sites/VirtualSitesInertialessTracers.hpp @@ -22,22 +22,27 @@ #ifndef SCRIPT_INTERFACE_VIRTUAL_SITES_VIRTUAL_SITES_INERTIALESS_TRACERS_HPP #define SCRIPT_INTERFACE_VIRTUAL_SITES_VIRTUAL_SITES_INERTIALESS_TRACERS_HPP -#include "VirtualSites.hpp" #include "config.hpp" -#include "core/virtual_sites/VirtualSitesInertialessTracers.hpp" + #ifdef VIRTUAL_SITES_INERTIALESS_TRACERS +#include "VirtualSites.hpp" + +#include "core/virtual_sites/VirtualSitesInertialessTracers.hpp" + +#include + namespace ScriptInterface { namespace VirtualSites { class VirtualSitesInertialessTracers : public VirtualSites { public: VirtualSitesInertialessTracers() - : m_virtual_sites(new ::VirtualSitesInertialessTracers()){}; + : m_virtual_sites(new ::VirtualSitesInertialessTracers()) {} /** Vs implementation we are wrapping */ std::shared_ptr<::VirtualSites> virtual_sites() override { return m_virtual_sites; - }; + } private: std::shared_ptr<::VirtualSitesInertialessTracers> m_virtual_sites; @@ -45,5 +50,5 @@ class VirtualSitesInertialessTracers : public VirtualSites { } /* namespace VirtualSites */ } /* namespace ScriptInterface */ -#endif +#endif // VIRTUAL_SITES_INERTIALESS_TRACERS #endif diff --git a/src/script_interface/virtual_sites/VirtualSitesOff.hpp b/src/script_interface/virtual_sites/VirtualSitesOff.hpp index 605d5360674..185a7506213 100644 --- a/src/script_interface/virtual_sites/VirtualSitesOff.hpp +++ b/src/script_interface/virtual_sites/VirtualSitesOff.hpp @@ -22,21 +22,26 @@ #ifndef SCRIPT_INTERFACE_VIRTUAL_SITES_VIRTUAL_SITES_OFF_HPP #define SCRIPT_INTERFACE_VIRTUAL_SITES_VIRTUAL_SITES_OFF_HPP -#include "VirtualSites.hpp" #include "config.hpp" -#include "core/virtual_sites/VirtualSitesOff.hpp" + #ifdef VIRTUAL_SITES +#include "VirtualSites.hpp" + +#include "core/virtual_sites/VirtualSitesOff.hpp" + +#include + namespace ScriptInterface { namespace VirtualSites { class VirtualSitesOff : public VirtualSites { public: - VirtualSitesOff() : m_virtual_sites(new ::VirtualSitesOff()){}; + VirtualSitesOff() : m_virtual_sites(new ::VirtualSitesOff()) {} /** Vs implementation we are wrapping */ std::shared_ptr<::VirtualSites> virtual_sites() override { return m_virtual_sites; - }; + } private: std::shared_ptr<::VirtualSitesOff> m_virtual_sites; @@ -44,5 +49,5 @@ class VirtualSitesOff : public VirtualSites { } /* namespace VirtualSites */ } /* namespace ScriptInterface */ -#endif +#endif // VIRTUAL_SITES #endif diff --git a/src/script_interface/virtual_sites/VirtualSitesRelative.hpp b/src/script_interface/virtual_sites/VirtualSitesRelative.hpp index 56ec0ca372a..362bb94f0b5 100644 --- a/src/script_interface/virtual_sites/VirtualSitesRelative.hpp +++ b/src/script_interface/virtual_sites/VirtualSitesRelative.hpp @@ -22,21 +22,26 @@ #ifndef SCRIPT_INTERFACE_VIRTUAL_SITES_VIRTUAL_SITES_RELATIVE_HPP #define SCRIPT_INTERFACE_VIRTUAL_SITES_VIRTUAL_SITES_RELATIVE_HPP -#include "VirtualSites.hpp" #include "config.hpp" -#include "core/virtual_sites/VirtualSitesRelative.hpp" #ifdef VIRTUAL_SITES_RELATIVE + +#include "VirtualSites.hpp" + +#include "core/virtual_sites/VirtualSitesRelative.hpp" + +#include + namespace ScriptInterface { namespace VirtualSites { class VirtualSitesRelative : public VirtualSites { public: - VirtualSitesRelative() : m_virtual_sites(new ::VirtualSitesRelative()){}; + VirtualSitesRelative() : m_virtual_sites(new ::VirtualSitesRelative()) {} /** Vs implementation we are wrapping */ std::shared_ptr<::VirtualSites> virtual_sites() override { return m_virtual_sites; - }; + } private: std::shared_ptr<::VirtualSitesRelative> m_virtual_sites; @@ -44,5 +49,5 @@ class VirtualSitesRelative : public VirtualSites { } /* namespace VirtualSites */ } /* namespace ScriptInterface */ -#endif +#endif // VIRTUAL_SITES_RELATIVE #endif diff --git a/src/shapes/include/shapes/Cylinder.hpp b/src/shapes/include/shapes/Cylinder.hpp index 7873a7bce07..a6867cbea52 100644 --- a/src/shapes/include/shapes/Cylinder.hpp +++ b/src/shapes/include/shapes/Cylinder.hpp @@ -19,8 +19,8 @@ * along with this program. If not, see . */ -#ifndef __CYLINDER_HPP -#define __CYLINDER_HPP +#ifndef SRC_SHAPES_CYLINDER_HPP +#define SRC_SHAPES_CYLINDER_HPP #include "Shape.hpp" diff --git a/src/shapes/include/shapes/Rhomboid.hpp b/src/shapes/include/shapes/Rhomboid.hpp index 1136ec1825d..14e7e7a6046 100644 --- a/src/shapes/include/shapes/Rhomboid.hpp +++ b/src/shapes/include/shapes/Rhomboid.hpp @@ -19,8 +19,8 @@ * along with this program. If not, see . */ -#ifndef __RHOMBOID_HPP -#define __RHOMBOID_HPP +#ifndef SRC_SHAPES_RHOMBOID_HPP +#define SRC_SHAPES_RHOMBOID_HPP #include "Shape.hpp" #include diff --git a/src/shapes/include/shapes/Slitpore.hpp b/src/shapes/include/shapes/Slitpore.hpp index 5a0c61403e1..1309ee1ef55 100644 --- a/src/shapes/include/shapes/Slitpore.hpp +++ b/src/shapes/include/shapes/Slitpore.hpp @@ -19,8 +19,8 @@ * along with this program. If not, see . */ -#ifndef __SLITPORE_HPP -#define __SLITPORE_HPP +#ifndef SRC_SHAPES_SLITPORE_HPP +#define SRC_SHAPES_SLITPORE_HPP #include "Shape.hpp" diff --git a/src/shapes/include/shapes/Sphere.hpp b/src/shapes/include/shapes/Sphere.hpp index 313ea9ba76b..9eacfdc6b31 100644 --- a/src/shapes/include/shapes/Sphere.hpp +++ b/src/shapes/include/shapes/Sphere.hpp @@ -19,8 +19,8 @@ * along with this program. If not, see . */ -#ifndef __SPHERE_HPP -#define __SPHERE_HPP +#ifndef SRC_SHAPES_SPHERE_HPP +#define SRC_SHAPES_SPHERE_HPP #include "Shape.hpp" #include diff --git a/src/shapes/include/shapes/SpheroCylinder.hpp b/src/shapes/include/shapes/SpheroCylinder.hpp index 5d38139f999..93beccb8d95 100644 --- a/src/shapes/include/shapes/SpheroCylinder.hpp +++ b/src/shapes/include/shapes/SpheroCylinder.hpp @@ -19,8 +19,8 @@ * along with this program. If not, see . */ -#ifndef __SPHEROCYLINDER_HPP -#define __SPHEROCYLINDER_HPP +#ifndef SRC_SHAPES_SPHEROCYLINDER_HPP +#define SRC_SHAPES_SPHEROCYLINDER_HPP #include "Shape.hpp" #include diff --git a/src/shapes/include/shapes/Torus.hpp b/src/shapes/include/shapes/Torus.hpp index 8b972b61fc8..20adc4dc71a 100644 --- a/src/shapes/include/shapes/Torus.hpp +++ b/src/shapes/include/shapes/Torus.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef __TORUS_HPP -#define __TORUS_HPP +#ifndef SRC_SHAPES_TORUS_HPP +#define SRC_SHAPES_TORUS_HPP #include "Shape.hpp" #include diff --git a/src/shapes/src/Rhomboid.cpp b/src/shapes/src/Rhomboid.cpp index a2f1c90901a..902ca2416ac 100644 --- a/src/shapes/src/Rhomboid.cpp +++ b/src/shapes/src/Rhomboid.cpp @@ -184,7 +184,7 @@ void Rhomboid::calculate_dist(const Utils::Vector3d &pos, double &dist, [=, &vec, &dist](auto op1, auto op2, Utils::Vector3d const &distance, Utils::Vector3d const &axis, double const dir_dot_axis, int sign) { - auto d = (distance)*axis; + auto d = distance * axis; if (op1(dir_dot_axis, 0)) { d *= -1; } diff --git a/src/shapes/unit_tests/Sphere_test.cpp b/src/shapes/unit_tests/Sphere_test.cpp index f3e92d1710b..31ebbe798bf 100644 --- a/src/shapes/unit_tests/Sphere_test.cpp +++ b/src/shapes/unit_tests/Sphere_test.cpp @@ -37,13 +37,15 @@ void check_distance_function(Shapes::Sphere &s) { auto const tol = std::numeric_limits::epsilon() * 100; s.rad() = 1.0; - pos = {0., 0., 0.}; s.calculate_dist(pos, dist, vec); - double always_pos_dist = -s.direction() * dist; - BOOST_REQUIRE_GE(always_pos_dist, 0.0); - BOOST_REQUIRE_CLOSE(always_pos_dist, s.rad(), tol); - BOOST_REQUIRE_CLOSE(always_pos_dist, vec.norm(), tol); + + { + double always_pos_dist = -s.direction() * dist; + BOOST_REQUIRE_GE(always_pos_dist, 0.0); + BOOST_REQUIRE_CLOSE(always_pos_dist, s.rad(), tol); + BOOST_REQUIRE_CLOSE(always_pos_dist, vec.norm(), tol); + } for (int i = 0; i < 3; ++i) { pos[i] = 1.0; diff --git a/src/utils/include/utils/Array.hpp b/src/utils/include/utils/Array.hpp index 7f27eb1c07c..9369ecc584e 100644 --- a/src/utils/include/utils/Array.hpp +++ b/src/utils/include/utils/Array.hpp @@ -16,8 +16,14 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef UTILS_ARRAY_HPP -#define UTILS_ARRAY_HPP +#ifndef SRC_UTILS_INCLUDE_UTILS_ARRAY_HPP +#define SRC_UTILS_INCLUDE_UTILS_ARRAY_HPP + +/** + * @file + * + * @brief Array implementation with CUDA support. + */ #include "device_qualifier.hpp" #include "get.hpp" @@ -29,8 +35,8 @@ #include #include #include -namespace Utils { +namespace Utils { namespace detail { template struct Storage { @@ -50,7 +56,7 @@ struct ArrayFormatterStream { std::ostream &stream; char const *separator; ArrayFormatterStream(std::ostream &s, char const *sep) - : stream(s), separator(sep){}; + : stream(s), separator(sep) {} }; struct ArrayFormatter { @@ -120,27 +126,27 @@ template struct Array { DEVICE_QUALIFIER constexpr iterator begin() noexcept { return &m_storage.m_data[0]; - }; + } DEVICE_QUALIFIER constexpr const_iterator begin() const noexcept { return &m_storage.m_data[0]; - }; + } DEVICE_QUALIFIER constexpr const_iterator cbegin() const noexcept { return &m_storage.m_data[0]; - }; + } DEVICE_QUALIFIER constexpr iterator end() noexcept { return &m_storage.m_data[N]; - }; + } DEVICE_QUALIFIER constexpr const_iterator end() const noexcept { return &m_storage.m_data[N]; - }; + } DEVICE_QUALIFIER constexpr const_iterator cend() const noexcept { return &m_storage.m_data[N]; - }; + } DEVICE_QUALIFIER constexpr bool empty() const noexcept { return size() == 0; } @@ -149,14 +155,15 @@ template struct Array { DEVICE_QUALIFIER constexpr size_type max_size() const noexcept { return N; } DEVICE_QUALIFIER void fill(const value_type &value) { - for (size_type i = 0; i < size(); ++i) + for (size_type i = 0; i != size(); ++i) { m_storage.m_data[i] = value; + } } DEVICE_QUALIFIER static constexpr Array broadcast(const value_type &value) { Array ret{}; - for (size_type i = 0; i < N; ++i) { + for (size_type i = 0; i != N; ++i) { ret[i] = value; } return ret; @@ -208,4 +215,4 @@ auto get(Array const &a) -> std::enable_if_t<(I < N), const T &> { } } // namespace Utils -#endif +#endif // SRC_UTILS_INCLUDE_UTILS_ARRAY_HPP diff --git a/src/utils/include/utils/Counter.hpp b/src/utils/include/utils/Counter.hpp index 2812b8d17fd..8a265416a64 100644 --- a/src/utils/include/utils/Counter.hpp +++ b/src/utils/include/utils/Counter.hpp @@ -27,8 +27,7 @@ template class Counter { T m_val; T m_initial; friend class boost::serialization::access; - template - void serialize(Archive &ar, const unsigned int version) { + template void serialize(Archive &ar, const unsigned int) { ar &m_val; ar &m_initial; } diff --git a/src/utils/include/utils/Vector.hpp b/src/utils/include/utils/Vector.hpp index 3d8db6e977c..ab3d5741cd2 100644 --- a/src/utils/include/utils/Vector.hpp +++ b/src/utils/include/utils/Vector.hpp @@ -16,9 +16,15 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ +#ifndef SRC_UTILS_INCLUDE_UTILS_VECTOR_HPP +#define SRC_UTILS_INCLUDE_UTILS_VECTOR_HPP -#ifndef VECTOR_HPP -#define VECTOR_HPP +/** + * @file + * + * @brief Vector implementation and trait types + * for boost qvm interoperability. + */ #include #include @@ -500,4 +506,4 @@ template struct deduce_vec, 3> { } // namespace qvm } // namespace boost -#endif +#endif // SRC_UTILS_INCLUDE_UTILS_VECTOR_HPP diff --git a/src/utils/include/utils/as_const.hpp b/src/utils/include/utils/as_const.hpp index ea59bef2999..9e1f1f7068d 100644 --- a/src/utils/include/utils/as_const.hpp +++ b/src/utils/include/utils/as_const.hpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#ifndef CORE_UTILS_AS_CONST_HPP -#define CORE_UTILS_AS_CONST_HPP +#ifndef UTILS_AS_CONST_HPP +#define UTILS_AS_CONST_HPP #include diff --git a/src/utils/include/utils/interpolation/bspline_3d.hpp b/src/utils/include/utils/interpolation/bspline_3d.hpp index 244a356bbf8..01becffd000 100644 --- a/src/utils/include/utils/interpolation/bspline_3d.hpp +++ b/src/utils/include/utils/interpolation/bspline_3d.hpp @@ -50,8 +50,8 @@ void bspline_3d(const Vector3d &pos, const Kernel &kernel, const auto block = detail::ll_and_dist(pos, grid_spacing, offset); /* Precalc weights that are used multiple times. */ - std::array w_y; - std::array w_z; + std::array w_y{}; + std::array w_z{}; for (int i = 0; i < order; i++) { w_y[i] = bspline(i, block.distance[1]); w_z[i] = bspline(i, block.distance[2]); diff --git a/src/utils/include/utils/math/int_pow.hpp b/src/utils/include/utils/math/int_pow.hpp index 79c679ebb3c..7bfebad9e47 100644 --- a/src/utils/include/utils/math/int_pow.hpp +++ b/src/utils/include/utils/math/int_pow.hpp @@ -47,7 +47,7 @@ template struct int_pow_impl { }; template struct int_pow_impl { - DEVICE_QUALIFIER constexpr T operator()(T x) const { return T{1}; } + DEVICE_QUALIFIER constexpr T operator()(T) const { return T{1}; } }; } // namespace detail diff --git a/src/utils/include/utils/math/matrix_vector_product.hpp b/src/utils/include/utils/math/matrix_vector_product.hpp index 1e40c321c56..fc23d044f15 100644 --- a/src/utils/include/utils/math/matrix_vector_product.hpp +++ b/src/utils/include/utils/math/matrix_vector_product.hpp @@ -34,7 +34,7 @@ template struct mul { }; template struct mul<0, T> { - constexpr T operator()(const T a) const { return T{}; } + constexpr T operator()(const T) const { return T{}; } }; template struct mul<1, T> { diff --git a/src/utils/include/utils/math/quaternion.hpp b/src/utils/include/utils/math/quaternion.hpp index 53c830d6453..bef1962dd97 100644 --- a/src/utils/include/utils/math/quaternion.hpp +++ b/src/utils/include/utils/math/quaternion.hpp @@ -57,7 +57,7 @@ Quaternion convert_director_to_quaternion(Vector const &d) { // null vectors cannot be converted to quaternions if (dm < std::numeric_limits::epsilon()) { - return {1, 0, 0, 0}; + return {{{{1, 0, 0, 0}}}}; } // Calculate angles @@ -83,8 +83,8 @@ Quaternion convert_director_to_quaternion(Vector const &d) { auto const sin_theta2 = std::sin(theta2); auto const cos_phi2 = std::cos(phi2); auto const sin_phi2 = std::sin(phi2); - return {cos_theta2 * cos_phi2, -sin_theta2 * cos_phi2, -sin_theta2 * sin_phi2, - cos_theta2 * sin_phi2}; + return {{{{cos_theta2 * cos_phi2, -sin_theta2 * cos_phi2, + -sin_theta2 * sin_phi2, cos_theta2 * sin_phi2}}}}; } } // namespace Utils diff --git a/src/utils/include/utils/matrix.hpp b/src/utils/include/utils/matrix.hpp index 1e5656c0e82..dd838bac2b5 100644 --- a/src/utils/include/utils/matrix.hpp +++ b/src/utils/include/utils/matrix.hpp @@ -19,6 +19,13 @@ #ifndef SRC_UTILS_INCLUDE_UTILS_MATRIX_HPP #define SRC_UTILS_INCLUDE_UTILS_MATRIX_HPP +/** + * @file + * + * @brief Matrix implementation and trait types + * for boost qvm interoperability. + */ + #include "utils/Array.hpp" #include "utils/Vector.hpp" #include "utils/flatten.hpp" @@ -47,13 +54,6 @@ #include #include -/** - * @file matrix.hpp - * - * @brief This file contains a matrix implementation and the trait types needed - * for the boost qvm interoperability. - */ - namespace Utils { /** @@ -74,12 +74,13 @@ template struct Matrix { container m_data; +private: friend class boost::serialization::access; - template - void serialize(Archive &ar, const unsigned int version) { + template void serialize(Archive &ar, const unsigned int) { ar &m_data; } +public: Matrix() = default; Matrix(std::initializer_list init_list) { assert(init_list.size() == Rows * Cols); @@ -127,24 +128,24 @@ template struct Matrix { * @brief Iterator access (non const). * @return Returns an iterator to the first element of the matrix. */ - constexpr iterator begin() noexcept { return m_data.begin(); }; + constexpr iterator begin() noexcept { return m_data.begin(); } /** * @brief Iterator access (const). * @return Returns an iterator to the first element of the matrix. */ - constexpr const_iterator begin() const noexcept { return m_data.begin(); }; + constexpr const_iterator begin() const noexcept { return m_data.begin(); } /** * @brief Iterator access (non const). * @return Returns an iterator to the element following the last element of * the matrix. */ - constexpr iterator end() noexcept { return m_data.end(); }; + constexpr iterator end() noexcept { return m_data.end(); } /** * @brief Iterator access (non const). * @return Returns an iterator to the element following the last element of * the matrix. */ - constexpr const_iterator end() const noexcept { return m_data.end(); }; + constexpr const_iterator end() const noexcept { return m_data.end(); } /** * @brief Retrieve an entire matrix row. * @tparam R The row index. @@ -300,5 +301,4 @@ struct deduce_mat2, Utils::Matrix, 3, 3> { } // namespace qvm } // namespace boost - -#endif +#endif // SRC_UTILS_INCLUDE_UTILS_MATRIX_HPP diff --git a/src/utils/include/utils/mpi/cart_comm.hpp b/src/utils/include/utils/mpi/cart_comm.hpp index 0005f8b7678..d5882eeab4d 100644 --- a/src/utils/include/utils/mpi/cart_comm.hpp +++ b/src/utils/include/utils/mpi/cart_comm.hpp @@ -60,7 +60,7 @@ boost::mpi::communicator cart_create( (comm, dim, dims.data(), periodicity.data(), static_cast(reorder), &temp_comm)) - return boost::mpi::communicator(temp_comm, boost::mpi::comm_take_ownership); + return {temp_comm, boost::mpi::comm_take_ownership}; } /** @@ -113,13 +113,12 @@ inline std::pair cart_shift(boost::mpi::communicator const &comm, template Utils::Vector cart_neighbors(const boost::mpi::communicator &comm) { - using std::get; Vector ret; for (std::size_t i = 0; i < dim; i++) { - ret[2 * i + 0] = get<1>(cart_shift(comm, i, -1)); - ret[2 * i + 1] = get<1>(cart_shift(comm, i, +1)); + ret[2 * i + 0] = std::get<1>(cart_shift(comm, static_cast(i), -1)); + ret[2 * i + 1] = std::get<1>(cart_shift(comm, static_cast(i), +1)); } return ret; diff --git a/src/utils/include/utils/quaternion.hpp b/src/utils/include/utils/quaternion.hpp index bd5a9d3ed24..7da863ac63e 100644 --- a/src/utils/include/utils/quaternion.hpp +++ b/src/utils/include/utils/quaternion.hpp @@ -19,6 +19,13 @@ #ifndef SRC_UTILS_INCLUDE_UTILS_QUATERNION_HPP #define SRC_UTILS_INCLUDE_UTILS_QUATERNION_HPP +/** + * @file + * + * @brief Quaternion implementation and trait types + * for boost qvm interoperability. + */ + #include #include #include @@ -37,13 +44,6 @@ #include #include -/** - * @file quaternion.hpp - * - * @brief This file contains a matrix implementation and the trait types needed - * for the boost qvm interoperability. - */ - namespace Utils { /** @@ -58,11 +58,13 @@ template struct Quaternion { using value_type = typename container::value_type; using reference = typename container::reference; +private: friend class boost::serialization::access; - template - void serialize(Archive &ar, const unsigned int version) { + template void serialize(Archive &ar, const unsigned int) { ar &m_data; } + +public: /** * @brief Normalize the quaternion in place. */ diff --git a/src/utils/include/utils/sampling.hpp b/src/utils/include/utils/sampling.hpp index 9b581ee69c5..2d9b26a9549 100644 --- a/src/utils/include/utils/sampling.hpp +++ b/src/utils/include/utils/sampling.hpp @@ -49,6 +49,7 @@ std::vector get_cylindrical_sampling_positions( std::pair const &phi_limits, std::pair const &z_limits, std::size_t n_r_bins, std::size_t n_phi_bins, std::size_t n_z_bins, double sampling_density) { + auto constexpr endpoint = false; auto const delta_r = (r_limits.second - r_limits.first) / static_cast(n_r_bins); auto const delta_phi = @@ -58,22 +59,20 @@ std::vector get_cylindrical_sampling_positions( // azimuthal angle per bin such that we fulfill the sampling density // requirement. auto const smallest_bin_volume = - pi() * Utils::sqr(r_limits.first + delta_r) * delta_phi / (2.0 * pi()); + Utils::sqr(r_limits.first + delta_r) * delta_phi / 2.; auto const min_n_samples = std::max(n_z_bins, static_cast(std::round( smallest_bin_volume * sampling_density))); auto const delta_z = (z_limits.second - z_limits.first) / static_cast(min_n_samples); - auto const r_range = - make_lin_space(r_limits.first + .5 * delta_r, r_limits.second, n_r_bins, - /* endpoint */ false); + auto const r_range = make_lin_space(r_limits.first + .5 * delta_r, + r_limits.second, n_r_bins, endpoint); auto const phi_range = make_lin_space(phi_limits.first + .5 * delta_phi, phi_limits.second, - n_phi_bins, /* endpoint */ false); - auto const z_range = - make_lin_space(z_limits.first + .5 * delta_z, z_limits.second, - min_n_samples, /* endpoint */ false); + n_phi_bins, endpoint); + auto const z_range = make_lin_space(z_limits.first + .5 * delta_z, + z_limits.second, min_n_samples, endpoint); // Create the sampling positions for the innermost bin. std::vector sampling_positions; @@ -84,17 +83,10 @@ std::vector get_cylindrical_sampling_positions( } // Scale the number of samples for larger bins - auto arc_length = [delta_phi, delta_r](int r_bin) { - return delta_phi * (r_bin + 1) * delta_r; - }; - auto n_phi_samples = [arc_length](int r_bin) { - return arc_length(r_bin) / arc_length(0); - }; - auto phis = [n_phi_samples, n_phi_bins, phi_limits](int r_bin) { + auto phis = [n_phi_bins, phi_limits](long r_bin) { auto const phis_range = make_lin_space( phi_limits.first, phi_limits.second, - n_phi_bins * static_cast(std::round(n_phi_samples(r_bin))), - /*endpoint */ false); + n_phi_bins * (static_cast(r_bin) + 1), endpoint); return phis_range; }; // Calculate the sampling positions diff --git a/src/utils/include/utils/statistics/RunningAverage.hpp b/src/utils/include/utils/statistics/RunningAverage.hpp index 1e098988d6e..e73a8f526c2 100644 --- a/src/utils/include/utils/statistics/RunningAverage.hpp +++ b/src/utils/include/utils/statistics/RunningAverage.hpp @@ -19,8 +19,8 @@ * along with this program. If not, see . */ -#ifndef __RUNING_AVERAGE_HPP -#define __RUNING_AVERAGE_HPP +#ifndef STATISTICS_RUNING_AVERAGE_HPP +#define STATISTICS_RUNING_AVERAGE_HPP #include #include diff --git a/src/utils/include/utils/tuple.hpp b/src/utils/include/utils/tuple.hpp index a2feb4cc06b..16415f1015f 100644 --- a/src/utils/include/utils/tuple.hpp +++ b/src/utils/include/utils/tuple.hpp @@ -139,7 +139,7 @@ struct filter_impl { } template - constexpr static auto get(Tuple const &t, std::false_type) { + constexpr static auto get(Tuple const &, std::false_type) { return std::make_tuple(); } diff --git a/src/utils/tests/Array_test.cpp b/src/utils/tests/Array_test.cpp index ab517392b50..680fdfe7789 100644 --- a/src/utils/tests/Array_test.cpp +++ b/src/utils/tests/Array_test.cpp @@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(array_ctor) { } BOOST_AUTO_TEST_CASE(iterators) { - auto a = Array{{1, 2, 3, 4}}; + auto a = Array{{{1, 2, 3, 4}}}; BOOST_CHECK_EQUAL(*(a.begin()), 1); BOOST_CHECK_EQUAL(*(a.cbegin()), 1); @@ -62,7 +62,7 @@ BOOST_AUTO_TEST_CASE(iterators) { } BOOST_AUTO_TEST_CASE(element_access) { - auto a = Array{{5, 6, 7, 8, 9}}; + auto a = Array{{{5, 6, 7, 8, 9}}}; auto const &b = a; int c = 5; @@ -124,12 +124,12 @@ BOOST_AUTO_TEST_CASE(tuple_protocol) { static_assert(std::is_same, int>::value, ""); static_assert(A{}.size() == Utils::tuple_size::value, ""); - BOOST_CHECK_EQUAL(Utils::get<1>(A{1, 2, 3, 4}), 2); + BOOST_CHECK_EQUAL(Utils::get<1>(A{{{1, 2, 3, 4}}}), 2); } BOOST_AUTO_TEST_CASE(streaming_operator) { { - auto const a = Utils::Array{1}; + auto const a = Utils::Array{{{1}}}; std::stringstream ss; ss << a; @@ -138,7 +138,7 @@ BOOST_AUTO_TEST_CASE(streaming_operator) { } { - auto const a = Utils::Array{1, 2, 3}; + auto const a = Utils::Array{{{1, 2, 3}}}; std::stringstream ss; ss << a; @@ -149,7 +149,7 @@ BOOST_AUTO_TEST_CASE(streaming_operator) { BOOST_AUTO_TEST_CASE(formatter_and_streaming_operator) { { - auto const a = Utils::Array{1}; + auto const a = Utils::Array{{{1}}}; std::stringstream ss; ss << a.formatter("xyz") << a; @@ -158,7 +158,7 @@ BOOST_AUTO_TEST_CASE(formatter_and_streaming_operator) { } { - auto const a = Utils::Array{1, 2, 3}; + auto const a = Utils::Array{{{1, 2, 3}}}; std::stringstream ss; ss << a.formatter(" + ") << a; diff --git a/src/utils/tests/Bag_test.cpp b/src/utils/tests/Bag_test.cpp index 387aa6e07a2..66fdfb8fa44 100644 --- a/src/utils/tests/Bag_test.cpp +++ b/src/utils/tests/Bag_test.cpp @@ -42,7 +42,7 @@ BOOST_AUTO_TEST_CASE(constructor_) { BOOST_AUTO_TEST_CASE(insert_) { /* Copy insert */ { - auto const elements = std::array{1, 2, 3}; + auto const elements = std::array{{1, 2, 3}}; auto bag = Utils::Bag(); /* Elements can be inserted into the bag */ @@ -74,7 +74,7 @@ BOOST_AUTO_TEST_CASE(insert_) { } BOOST_AUTO_TEST_CASE(erase_) { - auto const elements = std::array{1, 2, 3}; + auto const elements = std::array{{1, 2, 3}}; { /* Given a bag with elements */ @@ -126,7 +126,7 @@ BOOST_AUTO_TEST_CASE(erase_) { } BOOST_AUTO_TEST_CASE(size_) { - auto const elements = std::array{1, 2, 3, 5, 6}; + auto const elements = std::array{{1, 2, 3, 5, 6}}; /* Given a bag with elements */ auto bag = Utils::Bag(); @@ -139,7 +139,7 @@ BOOST_AUTO_TEST_CASE(size_) { } BOOST_AUTO_TEST_CASE(iterator_range_) { - auto const elements = std::array{1, 2, 3, 5, 6}; + auto const elements = std::array{{1, 2, 3, 5, 6}}; /* Given a bag with elements */ auto bag = Utils::Bag(); @@ -192,7 +192,7 @@ BOOST_AUTO_TEST_CASE(reserve_) { } BOOST_AUTO_TEST_CASE(resize_) { - auto const elements = std::array{1, 2, 3, 5, 6}; + auto const elements = std::array{{1, 2, 3, 5, 6}}; /* Given a bag with elements */ auto bag = Utils::Bag(); @@ -212,8 +212,8 @@ BOOST_AUTO_TEST_CASE(resize_) { } BOOST_AUTO_TEST_CASE(swap_) { - auto const elements1 = std::array{1, 2, 3}; - auto const elements2 = std::array{1, 2, 3}; + auto const elements1 = std::array{{1, 2, 3}}; + auto const elements2 = std::array{{1, 2, 3}}; /* Given two bags with elements */ auto bag1 = Utils::Bag(); @@ -242,7 +242,7 @@ BOOST_AUTO_TEST_CASE(swap_) { } BOOST_AUTO_TEST_CASE(serialize_) { - auto const elements = std::array{1, 2, 3, 5, 6}; + auto const elements = std::array{{1, 2, 3, 5, 6}}; /* Given a bag with elements */ auto bag = Utils::Bag(); diff --git a/src/utils/tests/RunningAverage_test.cpp b/src/utils/tests/RunningAverage_test.cpp index 1c4989df661..f9997131fa8 100644 --- a/src/utils/tests/RunningAverage_test.cpp +++ b/src/utils/tests/RunningAverage_test.cpp @@ -23,10 +23,11 @@ #define BOOST_TEST_DYN_LINK #include -#include "utils/statistics/RunningAverage.hpp" - #include "random_sequence.hpp" +#include +#include + #include #include #include @@ -80,9 +81,8 @@ BOOST_AUTO_TEST_CASE(simple_variance_check) { } BOOST_AUTO_TEST_CASE(mean_and_variance) { + auto constexpr sample_size = sizeof(RandomSequence::values) / sizeof(double); Utils::Statistics::RunningAverage running_average; - const std::size_t sample_size = - sizeof(RandomSequence::values) / sizeof(double); for (auto const &val : RandomSequence::values) { running_average.add_sample(val); @@ -91,18 +91,19 @@ BOOST_AUTO_TEST_CASE(mean_and_variance) { BOOST_CHECK(running_average.n() == sample_size); /* Directly calculate the mean from the data */ - const double m_mean = std::accumulate(std::begin(RandomSequence::values), - std::end(RandomSequence::values), 0.0) / - sample_size; + const double mean = std::accumulate(std::begin(RandomSequence::values), + std::end(RandomSequence::values), 0.0) / + sample_size; - BOOST_CHECK(std::fabs(running_average.avg() - m_mean) <= 1e-12); + BOOST_CHECK_SMALL((running_average.avg() - mean), 1e-12); /* Directly calculate the variance from the data */ - double m_var = 0.0; - for (auto const &val : RandomSequence::values) { - m_var += (val - m_mean) * (val - m_mean); - } - m_var /= sample_size; - - BOOST_CHECK(std::fabs(running_average.var() - m_var) <= 1e-12); + auto const var = std::accumulate(std::begin(RandomSequence::values), + std::end(RandomSequence::values), 0.0, + [=](double acc, double val) { + return acc + Utils::sqr(val - mean); + }) / + sample_size; + + BOOST_CHECK_SMALL((running_average.var() - var), 1e-12); } diff --git a/src/utils/tests/Vector_test.cpp b/src/utils/tests/Vector_test.cpp index c49ebb22548..da66e53b182 100644 --- a/src/utils/tests/Vector_test.cpp +++ b/src/utils/tests/Vector_test.cpp @@ -171,15 +171,15 @@ BOOST_AUTO_TEST_CASE(algebraic_operators) { BOOST_CHECK(((v1 * 2) == Utils::Vector3i{2, 4, 6})); { - Utils::Vector3i v1{2, 4, 6}; - auto v2 = 2 * v1; - BOOST_CHECK(v2 == (v1 *= 2)); + Utils::Vector3i v3{2, 4, 6}; + auto v4 = 2 * v3; + BOOST_CHECK(v4 == (v3 *= 2)); } { - Utils::Vector3i v1{2, 4, 6}; - auto v2 = v1 / 2; - BOOST_CHECK(v2 == (v1 /= 2)); + Utils::Vector3i v3{2, 4, 6}; + auto v4 = v3 / 2; + BOOST_CHECK(v4 == (v3 /= 2)); } BOOST_CHECK((sqrt(Utils::Vector3d{1., 2., 3.}) == @@ -187,14 +187,14 @@ BOOST_AUTO_TEST_CASE(algebraic_operators) { /* modulo */ { - Utils::Vector3i v1{2, 7, 8}; - Utils::Vector3i v2{1, 2, 3}; + Utils::Vector3i v3{2, 7, 8}; + Utils::Vector3i v4{1, 2, 3}; - auto const res = v1 % v2; + auto const res = v3 % v4; - BOOST_CHECK_EQUAL(res[0], v1[0] % v2[0]); - BOOST_CHECK_EQUAL(res[1], v1[1] % v2[1]); - BOOST_CHECK_EQUAL(res[2], v1[2] % v2[2]); + BOOST_CHECK_EQUAL(res[0], v3[0] % v4[0]); + BOOST_CHECK_EQUAL(res[1], v3[1] % v4[1]); + BOOST_CHECK_EQUAL(res[2], v3[2] % v4[2]); } } diff --git a/src/utils/tests/bspline_test.cpp b/src/utils/tests/bspline_test.cpp index 391b8192629..0aa95c41d81 100644 --- a/src/utils/tests/bspline_test.cpp +++ b/src/utils/tests/bspline_test.cpp @@ -36,7 +36,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(bspline_normalization, T, test_bspline_orders) { // check that B-splines are normalized constexpr auto order = T::value; constexpr auto tol = 1e-10; - constexpr std::array x_values{-0.49999, 0.25, 0., 0.25, 0.49999}; + constexpr std::array x_values{{-0.49999, 0.25, 0., 0.25, 0.49999}}; for (auto const x : x_values) { double sum = 0; @@ -52,7 +52,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(bspline_symmetry, T, test_bspline_orders) { constexpr auto order = T::value; constexpr auto order_mid = (order % 2 == 0) ? order / 2 : (order + 1) / 2; constexpr auto tol = 1e-10; - constexpr std::array x_values{-0.49999, 0.25, 0.1}; + constexpr std::array x_values{{-0.49999, 0.25, 0.1}}; for (int i = 0; i < order_mid; ++i) { for (auto const x : x_values) { @@ -67,7 +67,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(bspline_derivatives, T, test_bspline_orders) { // check that B-splines derivatives are correct constexpr auto order = T::value; constexpr auto tol = 1e-8; - constexpr std::array x_values{-0.49999, 0.25, 0., 0.25, 0.49999}; + constexpr std::array x_values{{-0.49999, 0.25, 0., 0.25, 0.49999}}; // approximate a derivative using the two-point central difference formula auto bspline_d_approx = [](int i, double x, int order) { diff --git a/src/utils/tests/flatten_test.cpp b/src/utils/tests/flatten_test.cpp index ecdc5a04d64..f0eb1894cb3 100644 --- a/src/utils/tests/flatten_test.cpp +++ b/src/utils/tests/flatten_test.cpp @@ -34,7 +34,7 @@ BOOST_AUTO_TEST_CASE(flatten_) { /* not nested */ { - const std::array in = {1, 2, 3, 4}; + const std::array in = {{1, 2, 3, 4}}; std::array out{}; flatten(in, out.begin()); BOOST_CHECK_EQUAL_COLLECTIONS(in.begin(), in.end(), out.begin(), out.end()); @@ -42,22 +42,22 @@ BOOST_AUTO_TEST_CASE(flatten_) { /* nested */ { - const std::array, 2> in{{{1, 2}, {3, 4}}}; + const std::array, 2> in{{{{1, 2}}, {{3, 4}}}}; std::array out{}; flatten(in, out.begin()); - const std::array expected = {1, 2, 3, 4}; + const std::array expected = {{1, 2, 3, 4}}; BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), out.begin(), out.end()); } { - const std::vector in = {1, 2, 3, 4}; + const std::vector in = {{1, 2, 3, 4}}; std::vector out; flatten(in, std::back_inserter(out)); BOOST_CHECK_EQUAL_COLLECTIONS(in.begin(), in.end(), out.begin(), out.end()); } { - const std::vector in = {1, 2, 3, 4}; + const std::vector in = {{1, 2, 3, 4}}; std::list out; flatten(in, std::front_inserter(out)); BOOST_CHECK_EQUAL_COLLECTIONS(in.rbegin(), in.rend(), out.begin(), diff --git a/src/utils/tests/quaternion_test.cpp b/src/utils/tests/quaternion_test.cpp index 360a8c853bb..d6d248adcac 100644 --- a/src/utils/tests/quaternion_test.cpp +++ b/src/utils/tests/quaternion_test.cpp @@ -34,9 +34,9 @@ constexpr int x = 2; constexpr int y = 3; constexpr int z = 4; -Utils::Quaternion scalar_quat{w, 0, 0, 0}; -Utils::Quaternion full_quat{w, x, y, z}; -Utils::Quaternion vector_quat{0, x, y, z}; +Utils::Quaternion scalar_quat{{{{w, 0, 0, 0}}}}; +Utils::Quaternion full_quat{{{{w, x, y, z}}}}; +Utils::Quaternion vector_quat{{{{0, x, y, z}}}}; BOOST_AUTO_TEST_CASE(multiply_quaternions) { /* identities */ @@ -50,7 +50,7 @@ BOOST_AUTO_TEST_CASE(multiply_quaternions) { -vector_quat.norm2() * Utils::Quaternion::identity()); /* other */ - Utils::Quaternion const reference_quat{{-4, -20, -30, -40}}; + Utils::Quaternion const reference_quat{{{{-4, -20, -30, -40}}}}; BOOST_CHECK(full_quat * full_quat == reference_quat); } @@ -79,13 +79,13 @@ BOOST_AUTO_TEST_CASE(convert_director_to_quaternion) { #define CHECK_QUAT(input, ref) \ BOOST_CHECK_LE((convert_director_to_quaternion(input) - (ref)).norm2(), eps); /* identities */ - CHECK_QUAT((Vector3d{{0, 0, 0}}), (Quat{{1, 0, 0, 0}})); - CHECK_QUAT((Vector3d{{0, 0, +1}}), (Quat{{1, 0, 0, 0}})); - CHECK_QUAT((Vector3d{{0, 0, -1}}), (Quat{{0, -1, 0, 0}})); - CHECK_QUAT((Vector3d{{+1, 0, 0}}), (Quat{{+1, -1, +1, -1}} / 2.)); - CHECK_QUAT((Vector3d{{-1, 0, 0}}), (Quat{{-1, +1, +1, -1}} / 2.)); - CHECK_QUAT((Vector3d{{0, +1, 0}}), (Quat{{+1, -1, 0, 0}} * cos_pi_4)); - CHECK_QUAT((Vector3d{{0, -1, 0}}), (Quat{{0, 0, +1, -1}} * cos_pi_4)); + CHECK_QUAT((Vector3d{{0, 0, 0}}), (Quat{{{{1, 0, 0, 0}}}})); + CHECK_QUAT((Vector3d{{0, 0, +1}}), (Quat{{{{1, 0, 0, 0}}}})); + CHECK_QUAT((Vector3d{{0, 0, -1}}), (Quat{{{{0, -1, 0, 0}}}})); + CHECK_QUAT((Vector3d{{+1, 0, 0}}), (Quat{{{{+1, -1, +1, -1}}}} / 2.)); + CHECK_QUAT((Vector3d{{-1, 0, 0}}), (Quat{{{{-1, +1, +1, -1}}}} / 2.)); + CHECK_QUAT((Vector3d{{0, +1, 0}}), (Quat{{{{+1, -1, 0, 0}}}} * cos_pi_4)); + CHECK_QUAT((Vector3d{{0, -1, 0}}), (Quat{{{{0, 0, +1, -1}}}} * cos_pi_4)); /* self-consistency */ using Utils::convert_quaternion_to_director; for (int i = -2; i < 3; ++i) { @@ -108,18 +108,18 @@ BOOST_AUTO_TEST_CASE(convert_director_to_quaternion) { } BOOST_AUTO_TEST_CASE(quat_type) { - Utils::Quaternion test{1, 2, 3, 4}; + Utils::Quaternion test{{{{1, 2, 3, 4}}}}; BOOST_CHECK(test[0] == 1); test.normalize(); BOOST_CHECK_LE(test.norm() - 1.0, std::numeric_limits::epsilon()); BOOST_CHECK((Utils::Quaternion::identity() == - Utils::Quaternion{1, 0, 0, 0})); - BOOST_CHECK( - (Utils::Quaternion::zero() == Utils::Quaternion{0, 0, 0, 0})); - BOOST_CHECK((Utils::Quaternion{1, 0, 0, 0} == - Utils::Quaternion{2, 0, 0, 0}.normalized())); + Utils::Quaternion{{{{1, 0, 0, 0}}}})); + BOOST_CHECK((Utils::Quaternion::zero() == + Utils::Quaternion{{{{0, 0, 0, 0}}}})); + BOOST_CHECK((Utils::Quaternion{{{{1, 0, 0, 0}}}} == + Utils::Quaternion{{{{2, 0, 0, 0}}}}.normalized())); BOOST_CHECK_SMALL( - (Utils::Quaternion{2, 1, 3, 4}.normalized().norm() - 1.0), + (Utils::Quaternion{{{{2, 1, 3, 4}}}}.normalized().norm() - 1.0), std::numeric_limits::epsilon()); } diff --git a/src/utils/tests/rotation_matrix_test.cpp b/src/utils/tests/rotation_matrix_test.cpp index 8354f7f2268..cb1745e1f7e 100644 --- a/src/utils/tests/rotation_matrix_test.cpp +++ b/src/utils/tests/rotation_matrix_test.cpp @@ -41,9 +41,9 @@ BOOST_AUTO_TEST_CASE(rotation_matrix_test) { auto const axis = Vector3d{1., 2., 3.}.normalize(); auto const angle = 0.7; - auto const q = - Quaternion{cos(angle / 2), sin(angle / 2) * axis[0], - sin(angle / 2) * axis[1], sin(angle / 2) * axis[2]}; + auto const q = Quaternion{ + {{{cos(angle / 2), sin(angle / 2) * axis[0], sin(angle / 2) * axis[1], + sin(angle / 2) * axis[2]}}}}; auto const M = rotation_matrix(q); auto const v = Vector3d{3., 2., 1.}; diff --git a/src/utils/tests/sampling_test.cpp b/src/utils/tests/sampling_test.cpp index 6d5b3b2bf48..d2ac94376df 100644 --- a/src/utils/tests/sampling_test.cpp +++ b/src/utils/tests/sampling_test.cpp @@ -36,28 +36,33 @@ BOOST_AUTO_TEST_CASE(get_cylindrical_sampling_positions_test) { auto const max_phi = Utils::pi(); auto const min_z = 0.0; auto const max_z = 10.0; - auto const n_r_bins = 10; - auto const n_phi_bins = 10; - auto const n_z_bins = 10; + auto const n_r_bins = std::size_t{10}; + auto const n_phi_bins = std::size_t{11}; + auto const n_z_bins = std::size_t{12}; auto const sampling_density = 2.; + auto const sampling_positions = Utils::get_cylindrical_sampling_positions( std::make_pair(min_r, max_r), std::make_pair(min_phi, max_phi), std::make_pair(min_z, max_z), n_r_bins, n_phi_bins, n_z_bins, sampling_density); - std::array, 3> limits{ + + std::array const n_bins{{n_r_bins, n_phi_bins, n_z_bins}}; + std::array, 3> const limits{ {std::make_pair(min_r, max_r), std::make_pair(min_phi, max_phi), std::make_pair(min_z, max_z)}}; - std::array n_bins{{static_cast(n_r_bins), - static_cast(n_phi_bins), - static_cast(n_z_bins)}}; + Utils::CylindricalHistogram histogram(n_bins, limits); for (auto const &p : sampling_positions) { histogram.update(p); } + auto const tot_count = histogram.get_tot_count(); - std::array const dimensions{{n_r_bins, n_phi_bins, n_z_bins}}; - std::array index{}; for (auto const &c : tot_count) { BOOST_CHECK(c > 0); } + for (std::size_t i = 0; i < 3; ++i) { + BOOST_CHECK_EQUAL(histogram.get_n_bins()[i], n_bins[i]); + BOOST_CHECK_EQUAL(histogram.get_limits()[i].first, limits[i].first); + BOOST_CHECK_EQUAL(histogram.get_limits()[i].second, limits[i].second); + } } diff --git a/src/utils/tests/scatter_buffer_test.cpp b/src/utils/tests/scatter_buffer_test.cpp index e4d4a7492bb..1704f3a647a 100644 --- a/src/utils/tests/scatter_buffer_test.cpp +++ b/src/utils/tests/scatter_buffer_test.cpp @@ -24,24 +24,18 @@ #define BOOST_TEST_DYN_LINK #include -#include "utils/mpi/scatter_buffer.hpp" +#include #include #include #include -using Utils::Mpi::scatter_buffer; -namespace mpi = boost::mpi; - -void check_pointer(mpi::communicator comm, int root) { +void check_pointer(boost::mpi::communicator comm, int root) { std::vector buf; - if (comm.rank() == root) { auto const n = comm.size(); - const int total_size = n * (n + 1) / 2; - - std::vector buf; + auto const total_size = n * (n + 1) / 2; for (int i = 1; i <= comm.size(); i++) { for (int j = 0; j < i; j++) { @@ -64,12 +58,12 @@ void check_pointer(mpi::communicator comm, int root) { } BOOST_AUTO_TEST_CASE(pointer) { - mpi::communicator world; + boost::mpi::communicator world; check_pointer(world, 0); } BOOST_AUTO_TEST_CASE(pointer_root) { - mpi::communicator world; + boost::mpi::communicator world; auto root = (world.size() >= 3) ? world.size() - 2 : world.size() - 1; check_pointer(world, root); diff --git a/src/utils/tests/sgn_test.cpp b/src/utils/tests/sgn_test.cpp index f40ae7a2ea8..036d4696aef 100644 --- a/src/utils/tests/sgn_test.cpp +++ b/src/utils/tests/sgn_test.cpp @@ -20,11 +20,10 @@ #include #include "utils/math/sgn.hpp" -using Utils::sgn; /* Check that it can be used in constexpr context */ -static_assert(sgn(1), ""); +static_assert(Utils::sgn(1), ""); -BOOST_AUTO_TEST_CASE(pos) { BOOST_CHECK(1 == sgn(89)); } -BOOST_AUTO_TEST_CASE(nul) { BOOST_CHECK(0 == sgn(0)); } -BOOST_AUTO_TEST_CASE(neg) { BOOST_CHECK(-1 == sgn(-89)); } +BOOST_AUTO_TEST_CASE(pos) { BOOST_CHECK_EQUAL(Utils::sgn(89), 1); } +BOOST_AUTO_TEST_CASE(nul) { BOOST_CHECK_EQUAL(Utils::sgn(0), 0); } +BOOST_AUTO_TEST_CASE(neg) { BOOST_CHECK_EQUAL(Utils::sgn(-89), -1); } diff --git a/src/utils/tests/sinc_test.cpp b/src/utils/tests/sinc_test.cpp index fb13e84fb7e..dace2e56b47 100644 --- a/src/utils/tests/sinc_test.cpp +++ b/src/utils/tests/sinc_test.cpp @@ -24,15 +24,13 @@ #include #include -using Utils::sinc; - -BOOST_AUTO_TEST_CASE(zero) { BOOST_CHECK(1.0 == sinc(0.0)); } +BOOST_AUTO_TEST_CASE(zero) { BOOST_CHECK_EQUAL(Utils::sinc(0.0), 1.0); } BOOST_AUTO_TEST_CASE(approx) { for (double x = 0.001; x <= 0.11; x += 0.01) { - auto const approx = sinc(x); + auto const approx = Utils::sinc(x); auto const pi_x = boost::math::constants::pi() * x; auto const exact = std::sin(pi_x) / (pi_x); - BOOST_CHECK(std::abs(approx - exact) <= 1e-13); + BOOST_CHECK_SMALL(approx - exact, 1e-13); } } diff --git a/src/utils/tests/tuple_test.cpp b/src/utils/tests/tuple_test.cpp index de913b5c6ec..2e8c03737da 100644 --- a/src/utils/tests/tuple_test.cpp +++ b/src/utils/tests/tuple_test.cpp @@ -37,7 +37,7 @@ BOOST_AUTO_TEST_CASE(for_each_) { /* l-value reference tuple */ { - auto a = std::array{2, 3, 5}; + auto a = std::array{{2, 3, 5}}; for_each( [i = 0, a](int &e) mutable { @@ -74,7 +74,7 @@ BOOST_AUTO_TEST_CASE(for_each_) { BOOST_AUTO_TEST_CASE(apply_) { /* constexpr */ { - static_assert(Utils::apply(std::plus<>(), std::array{3, 8}) == 11, + static_assert(Utils::apply(std::plus<>(), std::array{{3, 8}}) == 11, ""); } @@ -117,14 +117,14 @@ BOOST_AUTO_TEST_CASE(apply_) { BOOST_AUTO_TEST_CASE(find_if_) { { auto const result = Utils::find_if([](int e) { return e == 2; }, - std::array{1, 2, 3, 4}, + std::array{{1, 2, 3, 4}}, [](int e) { BOOST_CHECK_EQUAL(e, 2); }); BOOST_CHECK(result); } { auto const result = Utils::find_if([](int e) { return e == 5; }, - std::array{1, 2, 3, 4}, + std::array{{1, 2, 3, 4}}, [](int e) { BOOST_CHECK(false); }); BOOST_CHECK(not result); } diff --git a/testsuite/python/CMakeLists.txt b/testsuite/python/CMakeLists.txt index 0f3b5a3316d..be1a68c21e1 100644 --- a/testsuite/python/CMakeLists.txt +++ b/testsuite/python/CMakeLists.txt @@ -13,7 +13,7 @@ function(PYTHON_TEST) set(TEST_FILE ${TEST_FILE_CONFIGURED}) if(NOT DEFINED TEST_MAX_NUM_PROC) - set(TEST_MAX_NUM_PROC ${TEST_NP}) + set(TEST_MAX_NUM_PROC 1) endif() if(${TEST_MAX_NUM_PROC} GREATER ${TEST_NP}) @@ -23,12 +23,14 @@ function(PYTHON_TEST) endif() if(EXISTS ${MPIEXEC}) + set_mpiexec_tmpdir("${TEST_NAME}") add_test( NAME ${TEST_NAME} COMMAND ${MPIEXEC} ${MPIEXEC_OVERSUBSCRIBE} ${MPIEXEC_NUMPROC_FLAG} - ${TEST_NUM_PROC} ${MPIEXEC_PREFLAGS} ${CMAKE_BINARY_DIR}/pypresso - ${PYPRESSO_OPTIONS} ${TEST_FILE} ${MPIEXEC_POSTFLAGS}) + ${TEST_NUM_PROC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_TMPDIR} + ${CMAKE_BINARY_DIR}/pypresso ${PYPRESSO_OPTIONS} ${TEST_FILE} + ${MPIEXEC_POSTFLAGS}) else() add_test(${TEST_NAME} ${CMAKE_BINARY_DIR}/pypresso ${PYPRESSO_OPTIONS} ${TEST_FILE}) @@ -99,7 +101,7 @@ python_test(FILE dds-and-bh-gpu.py MAX_NUM_PROC 4 LABELS gpu) python_test(FILE electrostaticInteractions.py MAX_NUM_PROC 2) python_test(FILE engine_langevin.py MAX_NUM_PROC 4) python_test(FILE engine_lb.py MAX_NUM_PROC 2 LABELS gpu) -python_test(FILE experimental_decorator.py) +python_test(FILE experimental_decorator.py MAX_NUM_PROC 1) python_test(FILE icc.py MAX_NUM_PROC 4) python_test(FILE mass-and-rinertia_per_particle.py MAX_NUM_PROC 2 LABELS long) python_test(FILE integrate.py MAX_NUM_PROC 4) @@ -123,6 +125,7 @@ python_test(FILE rotational_inertia.py MAX_NUM_PROC 4) python_test(FILE rotational-diffusion-aniso.py MAX_NUM_PROC 1 LABELS long) python_test(FILE rotational_dynamics.py MAX_NUM_PROC 1) python_test(FILE script_interface.py MAX_NUM_PROC 4) +python_test(FILE reaction_methods.py MAX_NUM_PROC 1) python_test(FILE reaction_ensemble.py MAX_NUM_PROC 4) python_test(FILE widom_insertion.py MAX_NUM_PROC 1) python_test(FILE constant_pH.py MAX_NUM_PROC 1) @@ -144,7 +147,7 @@ python_test(FILE nsquare.py MAX_NUM_PROC 4) python_test(FILE virtual_sites_relative.py MAX_NUM_PROC 2) python_test(FILE virtual_sites_tracers.py MAX_NUM_PROC 2) python_test(FILE virtual_sites_tracers_gpu.py MAX_NUM_PROC 2 LABELS gpu) -python_test(FILE domain_decomposition.py MAX_NUM_PROC 4) +python_test(FILE regular_decomposition.py MAX_NUM_PROC 4) python_test(FILE integrator_npt.py MAX_NUM_PROC 4) python_test(FILE integrator_npt_stats.py MAX_NUM_PROC 4 LABELS long) python_test(FILE integrator_steepest_descent.py MAX_NUM_PROC 4) @@ -186,6 +189,7 @@ python_test(FILE analyze_distribution.py MAX_NUM_PROC 1) python_test(FILE observable_profile.py MAX_NUM_PROC 4) python_test(FILE observable_profileLB.py MAX_NUM_PROC 1 LABELS gpu) python_test(FILE rotate_system.py MAX_NUM_PROC 4) +python_test(FILE es_math.py MAX_NUM_PROC 1) python_test(FILE random_pairs.py MAX_NUM_PROC 4) python_test(FILE lb_electrohydrodynamics.py MAX_NUM_PROC 4 LABELS gpu) python_test(FILE cluster_analysis.py MAX_NUM_PROC 4) @@ -218,10 +222,11 @@ python_test(FILE sigint.py DEPENDENCIES sigint_child.py MAX_NUM_PROC 1) python_test(FILE lb_density.py MAX_NUM_PROC 1) python_test(FILE observable_chain.py MAX_NUM_PROC 4) python_test(FILE mpiio.py MAX_NUM_PROC 4) +python_test(FILE mpiio_exceptions.py MAX_NUM_PROC 1) python_test(FILE gpu_availability.py MAX_NUM_PROC 2 LABELS gpu) python_test(FILE features.py MAX_NUM_PROC 1) python_test(FILE decorators.py MAX_NUM_PROC 1) -python_test(FILE galilei.py MAX_NUM_PROC 32) +python_test(FILE galilei.py MAX_NUM_PROC 4) python_test(FILE linear_momentum.py MAX_NUM_PROC 4) python_test(FILE linear_momentum_lb.py MAX_NUM_PROC 2 LABELS gpu) python_test(FILE mmm1d.py MAX_NUM_PROC 2) diff --git a/testsuite/python/actor.py b/testsuite/python/actor.py index 9789f59573f..897bf1c7ba0 100644 --- a/testsuite/python/actor.py +++ b/testsuite/python/actor.py @@ -44,10 +44,10 @@ def _set_params_in_es_core(self): self._core_args = self._params def valid_keys(self): - return "a", "b", "c" + return {"a", "b", "c"} def required_keys(self): - return "a", "c" + return {"a", "c"} def default_params(self): return {"a": False, "b": False, "c": False} @@ -108,6 +108,21 @@ def test_deactivation(self): self.assertEqual(params["b"], False) self.assertEqual(params["c"], True) + def test_exception(self): + error_msg_valid = (r"Only the following keys can be given as keyword arguments: " + r"\['a', 'b', 'c'\], got \['a', 'c', 'd'\] \(unknown \['d'\]\)") + error_msg_required = (r"The following keys have to be given as keyword arguments: " + r"\['a', 'c'\], got \['a'\] \(missing \['c'\]\)") + with self.assertRaisesRegex(ValueError, error_msg_valid): + TestActor(a=True, c=True, d=True) + with self.assertRaisesRegex(ValueError, error_msg_required): + TestActor(a=True) + valid_actor = TestActor(a=True, c=True) + with self.assertRaisesRegex(ValueError, error_msg_valid): + valid_actor.set_params(a=True, c=True, d=True) + with self.assertRaisesRegex(ValueError, error_msg_required): + valid_actor.set_params(a=True) + class ActorsTest(ut.TestCase): diff --git a/testsuite/python/brownian_dynamics.py b/testsuite/python/brownian_dynamics.py index b8e25c149eb..bdc663f5bd7 100644 --- a/testsuite/python/brownian_dynamics.py +++ b/testsuite/python/brownian_dynamics.py @@ -26,7 +26,7 @@ class BrownianThermostat(ut.TestCase): """Test Brownian Dynamics""" system = espressomd.System(box_l=[1.0, 1.0, 1.0]) - system.cell_system.set_domain_decomposition(use_verlet_lists=True) + system.cell_system.set_regular_decomposition(use_verlet_lists=True) system.cell_system.skin = 0 system.periodicity = [0, 0, 0] diff --git a/testsuite/python/brownian_dynamics_stats.py b/testsuite/python/brownian_dynamics_stats.py index 470e48fdeaf..c614c17a7cc 100644 --- a/testsuite/python/brownian_dynamics_stats.py +++ b/testsuite/python/brownian_dynamics_stats.py @@ -30,7 +30,7 @@ class BrownianThermostat(ut.TestCase, thermostats_common.ThermostatsCommon): """Tests velocity distributions and diffusion for Brownian Dynamics""" system = espressomd.System(box_l=[1.0, 1.0, 1.0]) - system.cell_system.set_domain_decomposition(use_verlet_lists=True) + system.cell_system.set_regular_decomposition(use_verlet_lists=True) system.cell_system.skin = 0 system.periodicity = [0, 0, 0] diff --git a/testsuite/python/cellsystem.py b/testsuite/python/cellsystem.py index a5a2b5358f9..c7f4b72070a 100644 --- a/testsuite/python/cellsystem.py +++ b/testsuite/python/cellsystem.py @@ -30,14 +30,15 @@ def test_cell_system(self): self.system.cell_system.set_n_square(use_verlet_lists=False) s = self.system.cell_system.get_state() self.assertEqual([s['use_verlet_list'], s['type']], [0, "nsquare"]) - self.system.cell_system.set_domain_decomposition(use_verlet_lists=True) + self.system.cell_system.set_regular_decomposition( + use_verlet_lists=True) s = self.system.cell_system.get_state() self.assertEqual( - [s['use_verlet_list'], s['type']], [1, "domain_decomposition"]) + [s['use_verlet_list'], s['type']], [1, "regular_decomposition"]) @ut.skipIf(n_nodes == 1, "Skipping test: only runs for n_nodes >= 2") def test_node_grid(self): - self.system.cell_system.set_domain_decomposition() + self.system.cell_system.set_regular_decomposition() for i in range(3): node_grid_ref = [1, 1, 1] node_grid_ref[i] = self.n_nodes diff --git a/testsuite/python/collision_detection.py b/testsuite/python/collision_detection.py index b4ab3a5bbdb..8a5930c97a1 100644 --- a/testsuite/python/collision_detection.py +++ b/testsuite/python/collision_detection.py @@ -61,6 +61,7 @@ def get_state_set_state_consistency(self): def test_00_interface_and_defaults(self): # Is it off by default self.assertEqual(self.system.collision_detection.mode, "off") + # Make sure params cannot be set individually with self.assertRaises(Exception): self.system.collision_detection.mode = "bind_centers" @@ -69,6 +70,7 @@ def test_00_interface_and_defaults(self): for unknown_mode in (0, "unknown"): with self.assertRaisesRegex(Exception, "Mode not handled"): self.system.collision_detection.set_params(mode=unknown_mode) + self.assertIsNone(self.system.collision_detection.call_method("none")) # That should work self.system.collision_detection.set_params(mode="off") @@ -635,7 +637,7 @@ def test_bind_three_particles(self): system.part.add(id=4, pos=e) system.part.add(id=1, pos=b) - system.cell_system.set_domain_decomposition() + system.cell_system.set_regular_decomposition() system.integrator.run(1, recalc_forces=True) self.verify_triangle_binding(cutoff, system.bonded_inter[2], res) system.cell_system.set_n_square() diff --git a/testsuite/python/constant_pH.py b/testsuite/python/constant_pH.py index b0e1d0f14d4..37d25fe5320 100644 --- a/testsuite/python/constant_pH.py +++ b/testsuite/python/constant_pH.py @@ -54,25 +54,25 @@ def test_ideal_alpha(self): RE = espressomd.reaction_ensemble.ConstantpHEnsemble( kT=1.0, exclusion_radius=1, - seed=44) + seed=44, + constant_pH=pH) RE.add_reaction( gamma=10**(-pKa), reactant_types=[types["HA"]], product_types=[types["A-"], types["H+"]], default_charges=charges_dict) - RE.constant_pH = pH # Set the hidden particle type to the lowest possible number to speed # up the simulation - RE.set_non_interacting_type(max(types.values()) + 1) + RE.set_non_interacting_type(type=max(types.values()) + 1) # equilibration - RE.reaction(800) + RE.reaction(reaction_steps=800) # sampling alphas = [] for _ in range(80): - RE.reaction(15) + RE.reaction(reaction_steps=15) num_H = system.number_of_particles(type=types["H+"]) num_HA = system.number_of_particles(type=types["HA"]) num_A = system.number_of_particles(type=types["A-"]) diff --git a/testsuite/python/constant_pH_stats.py b/testsuite/python/constant_pH_stats.py index b2671a8e3cd..f7d05385c45 100644 --- a/testsuite/python/constant_pH_stats.py +++ b/testsuite/python/constant_pH_stats.py @@ -52,7 +52,7 @@ class ReactionEnsembleTest(ut.TestCase): system.cell_system.skin = 0.4 system.time_step = 0.01 RE = espressomd.reaction_ensemble.ConstantpHEnsemble( - kT=1.0, exclusion_radius=1, seed=44) + kT=1.0, exclusion_radius=1, seed=44, constant_pH=pH) @classmethod def setUpClass(cls): @@ -65,7 +65,6 @@ def setUpClass(cls): reactant_types=[cls.types["HA"]], product_types=[cls.types["A-"], cls.types["H+"]], default_charges=cls.charges_dict) - cls.RE.constant_pH = cls.pH @classmethod def ideal_alpha(cls, pH): @@ -79,18 +78,18 @@ def test_ideal_titration_curve(self): # Set the hidden particle type to the lowest possible number to speed # up the simulation - RE.set_non_interacting_type(max(types.values()) + 1) + RE.set_non_interacting_type(type=max(types.values()) + 1) # chemical warmup - get close to chemical equilibrium before we start # sampling - RE.reaction(40 * N0) + RE.reaction(reaction_steps=40 * N0) average_NH = 0.0 average_NHA = 0.0 average_NA = 0.0 num_samples = 1000 for _ in range(num_samples): - RE.reaction(10) + RE.reaction(reaction_steps=10) average_NH += system.number_of_particles(type=types["H+"]) average_NHA += system.number_of_particles(type=types["HA"]) average_NA += system.number_of_particles(type=types["A-"]) diff --git a/testsuite/python/coulomb_mixed_periodicity.py b/testsuite/python/coulomb_mixed_periodicity.py index d5b276094ea..706f6774690 100644 --- a/testsuite/python/coulomb_mixed_periodicity.py +++ b/testsuite/python/coulomb_mixed_periodicity.py @@ -76,7 +76,7 @@ def test_elc(self): for p in self.system.part: assert p.pos[2] >= 0. and p.pos[2] <= 9., f'particle {p.id} in gap' - self.system.cell_system.set_domain_decomposition() + self.system.cell_system.set_regular_decomposition() self.system.cell_system.node_grid = sorted( self.system.cell_system.node_grid, key=lambda x: -x) self.system.periodicity = [1, 1, 1] @@ -95,7 +95,7 @@ def test_elc(self): 'Skipping test: missing feature SCAFACOS or p2nfft method') def test_scafacos_p2nfft(self): self.system.periodicity = [1, 1, 0] - self.system.cell_system.set_domain_decomposition() + self.system.cell_system.set_regular_decomposition() scafacos = espressomd.electrostatics.Scafacos( prefactor=1, diff --git a/testsuite/python/ek_charged_plate.py b/testsuite/python/ek_charged_plate.py index 6b13be4381b..d8976d4b161 100644 --- a/testsuite/python/ek_charged_plate.py +++ b/testsuite/python/ek_charged_plate.py @@ -164,11 +164,18 @@ def test(self): negative_ions[i, j, 30].density = 0.0 # Test error when trying to change ekin parameters after initialisation - ek._params.update({'agrid': 3, - 'T': 0.01}) with self.assertRaises(RuntimeError): + ek._params.update({'agrid': 3, 'T': 0.01}) ek._set_params_in_es_core() + # Check errors from the constructor + with self.assertRaisesRegex(ValueError, r"The following keys have to be given as keyword arguments: " + r"\[.+\], got \[.+\] \(missing \['D'\]\)"): + espressomd.electrokinetics.Species(density=0, valency=1) + with self.assertRaisesRegex(ValueError, r"Only the following keys can be given as keyword arguments: " + r"\[.+\], got \[.+\] \(unknown \['U'\]\)"): + espressomd.electrokinetics.Species(density=0, valency=1, D=0, U=1) + if __name__ == "__main__": ut.main() diff --git a/testsuite/python/ek_eof_one_species.py b/testsuite/python/ek_eof_one_species.py index bc2aa3ed084..38af417e236 100644 --- a/testsuite/python/ek_eof_one_species.py +++ b/testsuite/python/ek_eof_one_species.py @@ -156,7 +156,7 @@ def bisection(): pntm = pnt0 + size else: sys.exit("Bisection method fails:\n" - "Tuning of domain boundaries may be required.") + "Tuning of regular boundaries may be required.") return pntm diff --git a/testsuite/python/elc_vs_analytic.py b/testsuite/python/elc_vs_analytic.py index 7805e8a26d6..333061ba6ad 100644 --- a/testsuite/python/elc_vs_analytic.py +++ b/testsuite/python/elc_vs_analytic.py @@ -54,7 +54,7 @@ def test_elc(self): q=-self.q[0]) self.system.box_l = [self.box_l, self.box_l, self.box_l + self.elc_gap] - self.system.cell_system.set_domain_decomposition( + self.system.cell_system.set_regular_decomposition( use_verlet_lists=True) self.system.periodicity = [1, 1, 1] p3m = espressomd.electrostatics.P3M(prefactor=self.prefactor, diff --git a/testsuite/python/electrostaticInteractions.py b/testsuite/python/electrostaticInteractions.py index e818bebfbd0..a9054f7ee3b 100644 --- a/testsuite/python/electrostaticInteractions.py +++ b/testsuite/python/electrostaticInteractions.py @@ -242,6 +242,14 @@ def test_rf_exceptions(self): self.system.actors.add(rf) self.system.actors.clear() + valid_actor = espressomd.electrostatics.ReactionField( + **params, prefactor=1.0) + with self.assertRaisesRegex(Exception, "chosen method does not support tuning"): + valid_actor.tune() + with self.assertRaisesRegex(ValueError, r"Only the following keys can be given as keyword arguments: " + r"\[.+\], got \[.+\] \(unknown \['coulomb_prefactor'\]\)"): + valid_actor.tune(coulomb_prefactor=1.0) + if __name__ == "__main__": ut.main() diff --git a/testsuite/python/es_math.py b/testsuite/python/es_math.py index 4481fc24311..012eb9ccd01 100644 --- a/testsuite/python/es_math.py +++ b/testsuite/python/es_math.py @@ -14,6 +14,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . + import numpy as np import unittest as ut import espressomd.math @@ -44,10 +45,8 @@ def test_cylindrical_transformation_parameters(self): center=3 * [42], axis=[0, 1, 0], orientation=[1, 0, 0]) self.check_orthonormality(ctp_full.axis, ctp_full.orientation) - with self.assertRaises(Exception): - ctp_only_center = espressomd.math.CylindricalTransformationParameters( - center=3 * [42]) - ctp_only_center.axis = 3 * [3] + with self.assertRaises(RuntimeError): + espressomd.math.CylindricalTransformationParameters(center=3 * [4]) if __name__ == "__main__": diff --git a/testsuite/python/integrator_exceptions.py b/testsuite/python/integrator_exceptions.py index db1ec33cc27..08360150939 100644 --- a/testsuite/python/integrator_exceptions.py +++ b/testsuite/python/integrator_exceptions.py @@ -65,6 +65,11 @@ def test_stokesian_integrator(self): self.system.integrator.run(0) def test_steepest_descent_integrator(self): + with self.assertRaisesRegex(ValueError, r"The following keys have to be given as keyword arguments: " + r"\['f_max', 'gamma', 'max_displacement'\], got " + r"\['f_max', 'gamma', 'max_d'\] \(missing \['max_displacement'\]\)"): + self.system.integrator.set_steepest_descent( + f_max=0, gamma=0.1, max_d=5) self.system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42) self.system.integrator.set_steepest_descent( f_max=0, gamma=0.1, max_displacement=0.1) diff --git a/testsuite/python/interactions_bonded.py b/testsuite/python/interactions_bonded.py index 792a66d2e7c..c9aac3aa6ee 100644 --- a/testsuite/python/interactions_bonded.py +++ b/testsuite/python/interactions_bonded.py @@ -46,7 +46,9 @@ def setUp(self): self.system.part.add(pos=self.start_pos, type=0) def tearDown(self): + self.system.actors.clear() self.system.part.clear() + self.system.bonded_inter.clear() # Test Harmonic Bond def test_harmonic(self): @@ -78,6 +80,33 @@ def test_fene(self): scalar_r=r, k=fene_k, d_r_max=fene_d_r_max, r_0=fene_r_0), 0.01, fene_r_0 + fene_d_r_max, True) + def test_virtual_bond(self): + # add sentinel harmonic bond, otherwise short-range loop is skipped + hb = espressomd.interactions.HarmonicBond(k=1., r_0=0.1, r_cut=0.5) + vb = espressomd.interactions.Virtual() + self.system.bonded_inter.add(hb) + self.system.bonded_inter.add(vb) + p1, p2 = self.system.part.all() + p1.add_bond((vb, p2)) + + self.system.integrator.run(steps=0, recalc_forces=True) + self.assertEqual(self.system.analysis.energy()["total"], 0.) + np.testing.assert_allclose(np.copy(p1.f), 0., atol=1e-12, rtol=0) + np.testing.assert_allclose(np.copy(p2.f), 0., atol=1e-12, rtol=0) + + @utx.skipIfMissingFeatures(["BOND_CONSTRAINT"]) + def test_rigid_bond(self): + rb = espressomd.interactions.RigidBond(r=1.0, ptol=0.1, vtol=0.1) + self.system.bonded_inter.add(rb) + p1, p2 = self.system.part.all() + p2.pos = p1.pos + np.array([1.0, 0., 0.]) + p1.add_bond((rb, p2)) + + self.system.integrator.run(steps=0, recalc_forces=True) + self.assertEqual(self.system.analysis.energy()["total"], 0.) + np.testing.assert_allclose(np.copy(p1.f), 0., atol=1e-12, rtol=0) + np.testing.assert_allclose(np.copy(p2.f), 0., atol=1e-12, rtol=0) + @utx.skipIfMissingFeatures(["ELECTROSTATICS"]) def test_coulomb(self): coulomb_k = 1 diff --git a/testsuite/python/interactions_bonded_interface.py b/testsuite/python/interactions_bonded_interface.py index b060f5d5031..dda1e79f3f4 100644 --- a/testsuite/python/interactions_bonded_interface.py +++ b/testsuite/python/interactions_bonded_interface.py @@ -147,6 +147,9 @@ def func(self): .format(bondClass(**params).type_name(), bondId, outParamsRef, outParams)) self.parameterKeys(outBond) + # check no-op + self.assertIsNone(outBond.call_method('unknown')) + return func test_fene = generateTestForBondParams( @@ -250,6 +253,10 @@ def test_exceptions(self): # sanity checks during bond construction with self.assertRaisesRegex(RuntimeError, "Parameter 'r_0' is missing"): espressomd.interactions.HarmonicBond(k=1.) + with self.assertRaisesRegex(ValueError, r"Only the following keys can be given as keyword arguments: " + r"\['k', 'r_0', 'r_cut'\], got \['k', 'r_0', 'rcut'\] " + r"\(unknown \['rcut'\]\)"): + espressomd.interactions.HarmonicBond(k=1., r_0=1., rcut=2.) with self.assertRaisesRegex(ValueError, "Unknown refShape: 'Unknown'"): espressomd.interactions.IBM_Tribend( ind1=0, ind2=1, ind3=2, ind4=3, kb=1.1, refShape='Unknown') diff --git a/testsuite/python/interactions_non-bonded_interface.py b/testsuite/python/interactions_non-bonded_interface.py index 629bf0669a6..d893f28ef79 100644 --- a/testsuite/python/interactions_non-bonded_interface.py +++ b/testsuite/python/interactions_non-bonded_interface.py @@ -17,6 +17,7 @@ # along with this program. If not, see . # import unittest as ut +import unittest_decorators as utx import tests_common import espressomd @@ -156,6 +157,27 @@ def func(self): "k2": 5.0, "mu": 2.0, "nu": 1.0}, "gay_berne") + @utx.skipIfMissingFeatures("LENNARD_JONES") + def test_exceptions(self): + err_msg_required = (r"The following keys have to be given as keyword arguments: " + r"\['cutoff', 'epsilon', 'shift', 'sigma'\], got " + r"\['epsilon', 'sigma'\] \(missing \['cutoff', 'shift'\]\)") + err_msg_valid = (r"Only the following keys can be given as keyword arguments: " + r"\['cutoff', 'epsilon', 'min', 'offset', 'shift', 'sigma'\], got " + r"\['cutoff', 'epsilon', 'shift', 'sigma', 'unknown'\] \(unknown \['unknown'\]\)") + with self.assertRaisesRegex(ValueError, err_msg_required): + espressomd.interactions.LennardJonesInteraction( + epsilon=1., sigma=2.) + with self.assertRaisesRegex(ValueError, err_msg_required): + self.system.non_bonded_inter[0, 0].lennard_jones.set_params( + epsilon=1., sigma=2.) + with self.assertRaisesRegex(ValueError, err_msg_valid): + espressomd.interactions.LennardJonesInteraction( + epsilon=1., sigma=2., cutoff=3., shift=4., unknown=5.) + with self.assertRaisesRegex(ValueError, err_msg_valid): + self.system.non_bonded_inter[0, 0].lennard_jones.set_params( + epsilon=1., sigma=2., cutoff=3., shift=4., unknown=5.) + if __name__ == "__main__": ut.main() diff --git a/testsuite/python/langevin_thermostat.py b/testsuite/python/langevin_thermostat.py index 86f19538ab6..ed931f8ccbb 100644 --- a/testsuite/python/langevin_thermostat.py +++ b/testsuite/python/langevin_thermostat.py @@ -26,7 +26,7 @@ class LangevinThermostat(ut.TestCase): """Test Langevin Dynamics""" system = espressomd.System(box_l=[1.0, 1.0, 1.0]) - system.cell_system.set_domain_decomposition(use_verlet_lists=True) + system.cell_system.set_regular_decomposition(use_verlet_lists=True) system.cell_system.skin = 0 system.periodicity = [0, 0, 0] diff --git a/testsuite/python/langevin_thermostat_stats.py b/testsuite/python/langevin_thermostat_stats.py index 604ef46f7a9..6a865bca844 100644 --- a/testsuite/python/langevin_thermostat_stats.py +++ b/testsuite/python/langevin_thermostat_stats.py @@ -30,7 +30,7 @@ class LangevinThermostat(ut.TestCase, thermostats_common.ThermostatsCommon): """Tests velocity distributions and diffusion for Langevin Dynamics""" system = espressomd.System(box_l=[1.0, 1.0, 1.0]) - system.cell_system.set_domain_decomposition(use_verlet_lists=True) + system.cell_system.set_regular_decomposition(use_verlet_lists=True) system.cell_system.skin = 0 system.periodicity = [0, 0, 0] diff --git a/testsuite/python/lb_boundary.py b/testsuite/python/lb_boundary.py index 94a2c7cfb51..15c76eb7a3b 100644 --- a/testsuite/python/lb_boundary.py +++ b/testsuite/python/lb_boundary.py @@ -21,6 +21,7 @@ import espressomd.shapes import espressomd.lbboundaries import itertools +import numpy as np class LBBoundariesBase: @@ -67,6 +68,14 @@ def test_size(self): lbb.add(espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1)) self.assertEqual(lbb.size(), 2) + def test_getters(self): + boundary = espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1) + with self.assertRaisesRegex(RuntimeError, "You probably tried to get the force of an lbboundary that was not added to system.lbboundaries"): + boundary.get_force() + self.system.lbboundaries.add(boundary) + np.testing.assert_equal(np.copy(boundary.get_force()), [0., 0., 0.]) + self.assertIsNone(boundary.call_method('unknown')) + def test_empty(self): lbb = self.system.lbboundaries self.assertTrue(lbb.empty()) diff --git a/testsuite/python/lb_vtk.py b/testsuite/python/lb_vtk.py index bc6bc283b3d..7ed32ebef82 100644 --- a/testsuite/python/lb_vtk.py +++ b/testsuite/python/lb_vtk.py @@ -38,7 +38,7 @@ class TestLBWrite: - system = espressomd.System(box_l=3 * [16]) + system = espressomd.System(box_l=[10, 11, 12]) system.time_step = 0.01 system.cell_system.skin = 0.4 @@ -56,7 +56,7 @@ def set_lbf(self): self.system.lbboundaries.add(espressomd.lbboundaries.LBBoundary( shape=espressomd.shapes.Wall(normal=[1, 0, 0], dist=1.5))) self.system.lbboundaries.add(espressomd.lbboundaries.LBBoundary( - shape=espressomd.shapes.Wall(normal=[-1, 0, 0], dist=-14.5))) + shape=espressomd.shapes.Wall(normal=[-1, 0, 0], dist=-10.5))) return lbf def parse_vtk(self, filepath, name, shape): @@ -85,7 +85,7 @@ def test_vtk(self): if os.path.exists(filepath): os.remove(filepath) - shape = [16, 16, 16] + shape = [10, 11, 12] lbf = self.set_lbf() self.system.integrator.run(100) @@ -108,7 +108,7 @@ def test_vtk(self): lbf.write_vtk_velocity('vtk_out/delme', [1, 1], 3 * [1]) with self.assertRaises(ValueError): lbf.write_vtk_velocity('vtk_out/delme', 3 * [1], np.array([2, 3])) - bb1, bb2 = ([1, 2, 3], [13, 14, 15]) + bb1, bb2 = ([1, 2, 3], [9, 10, 11]) lbf.write_vtk_velocity('vtk_out/velocity_bb.vtk', bb1, bb2) # check VTK files exist @@ -156,7 +156,7 @@ def test_print(self): if os.path.exists(filepath): os.remove(filepath) - shape = [16, 16, 16] + shape = [10, 11, 12] lbf = self.set_lbf() self.system.integrator.run(100) @@ -184,10 +184,10 @@ def test_print(self): node_velocity[i, j, k] = node.velocity node_boundary[i, j, k] = node.boundary - seq = np.arange(16) - ref_coord = np.array([np.tile(seq, 16 * 16), - np.tile(np.repeat(seq, 16), 16), - np.repeat(seq, 16 * 16)]).T + ref_coord = np.array([ + np.tile(np.arange(shape[0]), shape[1] * shape[2]), + np.tile(np.repeat(np.arange(shape[1]), shape[0]), shape[2]), + np.repeat(np.arange(shape[2]), shape[0] * shape[1])]).T dat_velocity = np.loadtxt('vtk_out/velocity.dat') dat_coord = (dat_velocity[:, 0:3] - 0.5).astype(int) diff --git a/testsuite/python/lj.py b/testsuite/python/lj.py index 9631323c22c..aaa6ad46e52 100644 --- a/testsuite/python/lj.py +++ b/testsuite/python/lj.py @@ -54,14 +54,15 @@ def check(self): self.assertLess(max_deviation, 1e-5) def test_dd(self): - self.system.cell_system.set_domain_decomposition( + self.system.cell_system.set_regular_decomposition( use_verlet_lists=False) self.system.integrator.run(recalc_forces=True, steps=0) self.check() def test_dd_vl(self): - self.system.cell_system.set_domain_decomposition(use_verlet_lists=True) + self.system.cell_system.set_regular_decomposition( + use_verlet_lists=True) # Build VL and calc ia self.system.integrator.run(recalc_forces=True, steps=0) diff --git a/testsuite/python/mass-and-rinertia_per_particle.py b/testsuite/python/mass-and-rinertia_per_particle.py index d3c1d7c5b34..89fea0d1f63 100644 --- a/testsuite/python/mass-and-rinertia_per_particle.py +++ b/testsuite/python/mass-and-rinertia_per_particle.py @@ -60,7 +60,7 @@ class ThermoTest(ut.TestCase): @classmethod def setUpClass(cls): np.random.seed(seed=15) - cls.system.cell_system.set_domain_decomposition(use_verlet_lists=True) + cls.system.cell_system.set_regular_decomposition(use_verlet_lists=True) cls.system.cell_system.skin = 5.0 def setUp(self): diff --git a/testsuite/python/mmm1d.py b/testsuite/python/mmm1d.py index 1940b88aec5..a63c56afa09 100644 --- a/testsuite/python/mmm1d.py +++ b/testsuite/python/mmm1d.py @@ -45,6 +45,7 @@ class ElectrostaticInteractionsTests: def setUp(self): self.system.periodicity = [0, 0, 1] + self.system.cell_system.set_n_square() self.system.part.add(pos=self.p_pos, q=self.p_q) self.mmm1d = self.MMM1D(prefactor=1.0, maxPWerror=1e-20) self.system.actors.add(self.mmm1d) @@ -110,6 +111,13 @@ def test_exceptions(self): self.system.actors.add(mmm1d) self.system.periodicity = (0, 0, 1) self.system.actors.clear() + if self.MMM1D is espressomd.electrostatics.MMM1D: + with self.assertRaisesRegex(Exception, "MMM1D requires the N-square cellsystem"): + mmm1d = self.MMM1D(prefactor=1.0, maxPWerror=1e-2) + self.system.cell_system.set_regular_decomposition() + self.system.actors.add(mmm1d) + self.system.cell_system.set_n_square() + self.system.actors.clear() @utx.skipIfMissingFeatures(["ELECTROSTATICS"]) diff --git a/testsuite/python/mpiio.py b/testsuite/python/mpiio.py index 1a2b1f34d1a..00a4fab9106 100644 --- a/testsuite/python/mpiio.py +++ b/testsuite/python/mpiio.py @@ -29,16 +29,13 @@ import random import os import dataclasses +import tempfile # Number of particles npart = 10 # Number of different bond types nbonds = 100 -filename = "testdata.mpiio" -exts = ["head", "pref", "id", "type", "pos", "vel", "boff", "bond"] -filenames = [filename + "." + ext for ext in exts] - @dataclasses.dataclass class MockParticle: @@ -49,12 +46,6 @@ class MockParticle: bonds: list -def clean_files(): - for f in filenames: - if os.path.isfile(f): - os.remove(f) - - def randint_different_from(a, b, n): """Returns a random integer in [a, b) that is not n.""" r = n @@ -100,50 +91,144 @@ class MPIIOTest(ut.TestCase): bend=i, phi0=i) test_mock_particles = get_random_mock_particles() - def setUp(self): + @classmethod + def setUpClass(cls): + cls.temp_dir = tempfile.TemporaryDirectory() + + @classmethod + def tearDownClass(cls): + cls.temp_dir.cleanup() + + def add_particles(self): """Sets up a system from test_mock_particles and prepares environment for the tests.""" - clean_files() # Prior call might not have completed successfully for p in self.test_mock_particles: self.system.part.add(id=p.id, type=p.type, pos=p.pos, v=p.v) for b in p.bonds: self.system.part.by_id(p.id).add_bond(b) def tearDown(self): - clean_files() - - def check_files_exist(self): + self.system.part.clear() + + def generate_prefix(self, test_id): + return os.path.join(self.temp_dir.name, test_id.rsplit('.')[-1]) + + def build_list_of_expected_files(self, prefix, **fields): + exts = {'head', 'pref', 'id'} + if fields.get('types', False): + exts.add('type') + if fields.get('positions', False): + exts.add('pos') + if fields.get('velocities', False): + exts.add('vel') + if fields.get('bonds', False): + exts.add('boff') + exts.add('bond') + return {f'{prefix}.{ext}' for ext in exts} + + def check_files_exist(self, prefix, **fields): """Checks if all necessary files have been written.""" - for fn in filenames: - self.assertTrue(os.path.isfile(fn)) + filepaths = self.build_list_of_expected_files(prefix, **fields) + for filepath in filepaths: + self.assertTrue(os.path.isfile(filepath)) - def check_sample_system(self): - """Checks the particles in the ESPResSo system "self.s" against the - true values in "self.test_particles".""" + def check_sample_system(self, **fields): + """Checks particles in the system against the reference values.""" + self.assertEqual(len(self.system.part), len(self.test_mock_particles)) for p, q in zip(self.system.part, self.test_mock_particles): + ref_t = q.type if fields.get('types', False) else 0 + ref_p = q.pos if fields.get('positions', False) else [0., 0., 0.] + ref_v = q.v if fields.get('velocities', False) else [0., 0., 0.] + ref_b = q.bonds if fields.get('bonds', False) else [] self.assertEqual(p.id, q.id) - self.assertEqual(p.type, q.type) - np.testing.assert_array_equal(np.copy(p.pos), q.pos) - np.testing.assert_array_equal(np.copy(p.v), q.v) - self.assertEqual(len(p.bonds), len(q.bonds)) + self.assertEqual(p.type, ref_t) + np.testing.assert_array_equal(np.copy(p.pos), ref_p) + np.testing.assert_array_equal(np.copy(p.v), ref_v) + self.assertEqual(len(p.bonds), len(ref_b)) # Check all bonds - for bp, bq in zip(p.bonds, q.bonds): + for bp, bq in zip(p.bonds, ref_b): # Bond type - "bend" stores the index of the bond self.assertEqual(bp[0].params["bend"], bq[0]) # Bond partners np.testing.assert_array_equal(bp[1:], bq[1:]) def test_mpiio(self): - espressomd.io.mpiio.mpiio.write( - filename, types=True, positions=True, velocities=True, bonds=True) - - self.check_files_exist() - - self.system.part.clear() # Clear to be on the safe side - espressomd.io.mpiio.mpiio.read( - filename, types=True, positions=True, velocities=True, bonds=True) - - self.check_sample_system() + fields = { + 'types': True, + 'positions': True, + 'velocities': True, + 'bonds': True} + prefix = self.generate_prefix(self.id()) + mpiio = espressomd.io.mpiio.Mpiio() + + self.add_particles() + mpiio.write(prefix, **fields) + self.check_files_exist(prefix, **fields) + + self.system.part.clear() + mpiio.read(prefix, **fields) + self.check_sample_system(**fields) + + def test_mpiio_without_positions(self): + prefix = self.generate_prefix(self.id()) + mpiio = espressomd.io.mpiio.Mpiio() + self.add_particles() + mpiio.write(prefix, types=True, positions=False) + self.system.part.clear() + mpiio.read(prefix, types=True, positions=False) + self.check_sample_system(types=True, positions=False) + + def test_mpiio_without_types(self): + prefix = self.generate_prefix(self.id()) + mpiio = espressomd.io.mpiio.Mpiio() + self.add_particles() + mpiio.write(prefix, types=False, positions=True) + self.system.part.clear() + mpiio.read(prefix, types=False, positions=True) + self.check_sample_system(types=False, positions=True) + + def test_mpiio_multiple_instances(self): + fields1 = { + 'types': True, + 'positions': True, + 'velocities': True, + 'bonds': True} + fields2 = { + 'types': True, + 'positions': True, + 'velocities': False, + 'bonds': False} + prefix1 = self.generate_prefix(self.id()) + '.1' + prefix2 = self.generate_prefix(self.id()) + '.2' + mpiio1 = espressomd.io.mpiio.Mpiio() + mpiio2 = espressomd.io.mpiio.Mpiio() + + self.add_particles() + mpiio1.write(prefix1, **fields1) + mpiio2.write(prefix2, **fields2) + self.check_files_exist(prefix1, **fields1) + self.check_files_exist(prefix2, **fields2) + + self.system.part.clear() + mpiio1.read(prefix1, **fields1) + self.check_sample_system(**fields1) + + self.system.part.clear() + mpiio2.read(prefix2, **fields2) + self.check_sample_system(**fields2) + + def test_mpiio_exceptions(self): + mpiio = espressomd.io.mpiio.Mpiio() + prefix = self.generate_prefix(self.id()) + msg_prefix = "Need to supply output prefix via the 'prefix' argument." + with self.assertRaisesRegex(ValueError, msg_prefix): + mpiio.write(None, positions=True) + with self.assertRaisesRegex(ValueError, msg_prefix): + mpiio.read(None, positions=True) + with self.assertRaisesRegex(ValueError, "No output fields chosen."): + mpiio.write(prefix) + with self.assertRaisesRegex(ValueError, "No output fields chosen."): + mpiio.read(prefix) if __name__ == '__main__': diff --git a/testsuite/python/mpiio_exceptions.py b/testsuite/python/mpiio_exceptions.py new file mode 100644 index 00000000000..7f3595cfa12 --- /dev/null +++ b/testsuite/python/mpiio_exceptions.py @@ -0,0 +1,141 @@ +# +# Copyright (C) 2022 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import espressomd +import espressomd.io + +import unittest as ut +import os +import tempfile + + +class MPIIOMockGenerator: + """Mock MPI-IO files.""" + + def __init__(self, root): + self.root = root + self.counter = 0 + + def _create_file(self, fn, mode, content=b''): + with open(os.open(fn, os.O_CREAT | os.O_WRONLY, mode), 'wb') as f: + f.write(content) + + def create(self, *suffixes, read_only=True, from_ref=None): + mode = 0o444 if read_only else 0o777 + filepath = os.path.join(self.root, f'testdata.mpiio.{self.counter}') + filepath_derived = [] + for suffix in suffixes: + derived = f'{filepath}.{suffix}' + content = b'' + if from_ref is not None: + with open(f'{from_ref}.{suffix}', 'rb') as f: + content = f.read() + self._create_file(derived, mode, content) + filepath_derived.append(derived) + if len(filepath_derived) == 1: + filepath_derived = filepath_derived[0] + self.counter += 1 + return filepath, filepath_derived + + +class MPIIOTest(ut.TestCase): + + """ + Test class for the MPI-IO core functionality. + Check for exceptions when data cannot be read or written. + With 1 MPI rank, fatal errors are just exceptions. + """ + system = espressomd.system.System(box_l=[1, 1, 1]) + n_nodes = system.cell_system.get_state()["n_nodes"] + + @classmethod + def setUpClass(cls): + cls.temp_dir = tempfile.TemporaryDirectory() + + @classmethod + def tearDownClass(cls): + cls.temp_dir.cleanup() + + @ut.skipIf(n_nodes != 1, "only works on 1 MPI rank") + def test_exceptions(self): + generator = MPIIOMockGenerator(self.temp_dir.name) + mpiio = espressomd.io.mpiio.Mpiio() + + # generate reference data + self.system.part.add(pos=[0, 0, 0]) + path_ref = generator.create()[0] + mpiio.write(path_ref, types=True) + self.system.part.clear() + + # check reference data is valid + mpiio.read(path_ref, types=True) + self.system.part.clear() + + # exception when the metadata cannot be written + path, fn = generator.create('head', read_only=True) + with self.assertRaisesRegex(RuntimeError, f'Could not open file "{fn}"'): + mpiio.write(path, types=True) + + # exception when the payload cannot be written + path, fn = generator.create('pref', read_only=True) + with self.assertRaisesRegex(RuntimeError, f'Could not open file "{fn}"'): + mpiio.write(path, types=True) + + # exception when calculating the size of a non-existent file + path, _ = generator.create(read_only=True) + fn = f'{path}.pref' + with self.assertRaisesRegex(RuntimeError, f'Could not get file size of "{fn}"'): + mpiio.read(path, types=True) + + # exception when the MPI world size differs for reading and writing + # (empty .pref file -> data was written with MPI world size of 0) + path, _ = generator.create('id', 'pref', read_only=True) + with self.assertRaisesRegex(RuntimeError, f'Trying to read a file with a different COMM size than at point of writing'): + mpiio.read(path, types=True) + + # exception when the particle types don't exist + path, _ = generator.create( + 'pref', 'id', 'head', read_only=False, from_ref=path_ref) + fn = f'{path}.type' + with self.assertRaisesRegex(RuntimeError, f'Could not open file "{fn}"'): + mpiio.read(path, types=True) + + # exception when the metadata doesn't exist + path, _ = generator.create( + 'id', 'pref', read_only=False, from_ref=path_ref) + fn = f'{path}.head' + with self.assertRaisesRegex(RuntimeError, f'Could not open file "{fn}"'): + mpiio.read(path, types=True) + + # exception when the metadata is empty + with open(fn, 'wb'): + pass + with self.assertRaisesRegex(RuntimeError, f'Could not read file "{fn}"'): + mpiio.read(path, types=True) + + # exception when reading data that was not written to disk + # (empty .pref file -> data was written with MPI world size of 0) + path, _ = generator.create( + 'id', 'pref', 'head', read_only=False, from_ref=path_ref) + with self.assertRaisesRegex(RuntimeError, f'Requesting to read fields which were not dumped'): + mpiio.read(path, types=True, bonds=True) + + +if __name__ == '__main__': + ut.main() diff --git a/testsuite/python/p3m_tuning_exceptions.py b/testsuite/python/p3m_tuning_exceptions.py index 52781405816..85e9c2b7a3b 100644 --- a/testsuite/python/p3m_tuning_exceptions.py +++ b/testsuite/python/p3m_tuning_exceptions.py @@ -19,6 +19,7 @@ import espressomd import unittest as ut import unittest_decorators as utx +import itertools class P3M_tuning_test(ut.TestCase): @@ -195,6 +196,26 @@ def test_04_invalid_params_p3m_cpu(self): self.check_invalid_params(espressomd.electrostatics.P3M) + # set up a valid actor + solver = espressomd.electrostatics.P3M( + prefactor=2, accuracy=0.1, cao=2, r_cut=3.18, mesh=8) + self.system.actors.add(solver) + + # check periodicity exceptions + for periodicity in itertools.product(range(2), range(2), range(2)): + if periodicity == (1, 1, 1): + continue + with self.assertRaisesRegex(Exception, r"P3M requires periodicity \(1, 1, 1\)"): + self.system.periodicity = periodicity + self.system.periodicity = (1, 1, 1) + + # check cell system exceptions + with self.assertRaisesRegex(Exception, "P3M requires the regular decomposition cell system"): + self.system.cell_system.set_n_square() + self.system.analysis.energy() + self.system.cell_system.set_regular_decomposition() + self.system.actors.clear() + @utx.skipIfMissingGPU() @utx.skipIfMissingFeatures("P3M") def test_04_invalid_params_p3m_gpu(self): @@ -215,6 +236,35 @@ def test_04_invalid_params_dp3m_cpu(self): self.check_invalid_params(espressomd.magnetostatics.DipolarP3M) + # check bisection exception + self.system.periodicity = (0, 0, 0) + with self.assertRaisesRegex(Exception, r"Root must be bracketed for bisection in dp3m_rtbisection"): + solver = espressomd.magnetostatics.DipolarP3M( + prefactor=2, accuracy=0.1) + self.system.actors.add(solver) + self.system.periodicity = (1, 1, 1) + self.system.actors.clear() + + # set up a valid actor + solver = espressomd.magnetostatics.DipolarP3M( + prefactor=2, accuracy=0.1, cao=1, r_cut=3.28125, mesh=5) + self.system.actors.add(solver) + + # check periodicity and cell system exceptions + for periodicity in itertools.product(range(2), range(2), range(2)): + if periodicity == (1, 1, 1): + continue + with self.assertRaisesRegex(Exception, r"dipolar P3M requires periodicity \(1, 1, 1\)"): + self.system.periodicity = periodicity + self.system.periodicity = (1, 1, 1) + + # check cell system exceptions + with self.assertRaisesRegex(Exception, "dipolar P3M requires the regular decomposition cell system"): + self.system.cell_system.set_n_square() + self.system.analysis.energy() + self.system.cell_system.set_regular_decomposition() + self.system.actors.clear() + @utx.skipIfMissingFeatures("P3M") def test_04_invalid_params_p3m_elc_cpu(self): import espressomd.electrostatics @@ -324,6 +374,12 @@ def test_04_invalid_params_dp3m_dlc_cpu(self): self.system.actors.add(solver_mdlc) self.system.actors.remove(solver_mdlc) + solver_mdlc = espressomd.magnetostatic_extensions.DLC( + gap_size=1, maxPWerror=1e-30) + with self.assertRaisesRegex(RuntimeError, "MDLC tuning failed: unable to find a proper cut-off for the given accuracy"): + self.system.actors.add(solver_mdlc) + self.system.actors.remove(solver_mdlc) + ########################################################### # block of tests where tuning should not throw exceptions # ########################################################### diff --git a/testsuite/python/pair_criteria.py b/testsuite/python/pair_criteria.py index 95790e0d2a4..87634241603 100644 --- a/testsuite/python/pair_criteria.py +++ b/testsuite/python/pair_criteria.py @@ -57,6 +57,11 @@ def test_distance_crit_non_periodic(self): self.assertTrue(not dc.decide(self.p1, self.p2)) self.assertTrue(not dc.decide(self.p1.id, self.p2.id)) + def test_distance_crit_exceptions(self): + dc = espressomd.pair_criteria.DistanceCriterion(cut_off=0.1) + with self.assertRaises(RuntimeError): + dc.call_method("unknown") + @utx.skipIfMissingFeatures("LENNARD_JONES") def test_energy_crit(self): # Setup purely repulsive lj diff --git a/testsuite/python/pairs.py b/testsuite/python/pairs.py index 8f032b50f3b..a466936980d 100644 --- a/testsuite/python/pairs.py +++ b/testsuite/python/pairs.py @@ -104,13 +104,13 @@ def test_nsquare_partial_z(self): self.run_and_check() def test_dd(self): - self.system.cell_system.set_domain_decomposition() + self.system.cell_system.set_regular_decomposition() self.system.periodicity = [1, 1, 1] self.run_and_check() self.check_range_exception() def test_dd_partial_z(self): - self.system.cell_system.set_domain_decomposition() + self.system.cell_system.set_regular_decomposition() self.system.periodicity = [1, 1, 0] self.run_and_check() self.check_range_exception() diff --git a/testsuite/python/particle.py b/testsuite/python/particle.py index d68e391d3ae..812b60e0d0c 100644 --- a/testsuite/python/particle.py +++ b/testsuite/python/particle.py @@ -344,7 +344,7 @@ def test_zz_remove_all(self): def test_coord_fold_corner_cases(self): system = self.system system.time_step = .5 - system.cell_system.set_domain_decomposition(use_verlet_lists=False) + system.cell_system.set_regular_decomposition(use_verlet_lists=False) system.cell_system.skin = 0 system.min_global_cut = 3 system.part.clear() diff --git a/testsuite/python/polymer_linear.py b/testsuite/python/polymer_linear.py index 4c466ff0924..8d4007c3121 100644 --- a/testsuite/python/polymer_linear.py +++ b/testsuite/python/polymer_linear.py @@ -16,7 +16,6 @@ # along with this program. If not, see . import unittest as ut import numpy as np -import random import espressomd import espressomd.polymer import espressomd.shapes @@ -34,7 +33,7 @@ class LinearPolymerPositions(ut.TestCase): """ box_l = 15 - seed = random.randint(0, 1000) + seed = 42 system = espressomd.System(box_l=[box_l, box_l, box_l]) @@ -128,6 +127,7 @@ def test_start_positions(self): num_poly = 90 num_mono = 25 bond_length = 0.83 + np.random.seed(seed=self.seed) start_positions = np.random.random((num_poly, 3)) * self.box_l # make sure that incorrect size leads to error @@ -198,11 +198,20 @@ def test_respect_constraints_wall(self): respect_constraints=True, seed=self.seed) self.system.constraints.remove(wall_constraint) - def test_failure(self): + def test_exceptions(self): """ - Check the runtime error message. + Check runtime error messages. """ + with self.assertRaisesRegex(ValueError, r"The following keys have to be given as keyword arguments: " + r"\[.+\], got \[.+\] \(missing \['seed'\]\)"): + espressomd.polymer.linear_polymer_positions( + n_polymers=1, beads_per_chain=10, bond_length=0.1) + with self.assertRaisesRegex(ValueError, r"Only the following keys can be given as keyword arguments: " + r"\[.+\], got \[.+\] \(unknown \['bondangle'\]\)"): + espressomd.polymer.linear_polymer_positions( + n_polymers=1, beads_per_chain=10, bond_length=0.1, seed=10, + bondangle=0.1) with self.assertRaisesRegex(Exception, 'Failed to create polymer positions.'): espressomd.polymer.linear_polymer_positions( n_polymers=1, beads_per_chain=10, diff --git a/testsuite/python/random_pairs.py b/testsuite/python/random_pairs.py index e08b01956b9..5c4b19d2e7c 100644 --- a/testsuite/python/random_pairs.py +++ b/testsuite/python/random_pairs.py @@ -82,7 +82,7 @@ def check_pairs(self, n2_pairs): self.assertEqual(n2_pairs ^ set(cs_pairs), set()) def check_dd(self, n2_pairs): - self.system.cell_system.set_domain_decomposition() + self.system.cell_system.set_regular_decomposition() self.check_pairs(n2_pairs) def check_n_squared(self, n2_pairs): diff --git a/testsuite/python/reaction_ensemble.py b/testsuite/python/reaction_ensemble.py index bc3c23ea378..96f160fd9e9 100644 --- a/testsuite/python/reaction_ensemble.py +++ b/testsuite/python/reaction_ensemble.py @@ -83,8 +83,7 @@ def setUpClass(cls): reactant_coefficients=cls.reactant_coefficients, product_types=cls.product_types, product_coefficients=cls.product_coefficients, - default_charges=cls.charge_dict, - check_for_electroneutrality=True) + default_charges=cls.charge_dict) def test_ideal_titration_curve(self): N0 = ReactionEnsembleTest.N0 @@ -97,18 +96,18 @@ def test_ideal_titration_curve(self): # Set the hidden particle type to the lowest possible number to speed # up the simulation - RE.set_non_interacting_type(max(types.values()) + 1) + RE.set_non_interacting_type(type=max(types.values()) + 1) # chemical warmup - get close to chemical equilibrium before we start # sampling - RE.reaction(5 * N0) + RE.reaction(reaction_steps=5 * N0) average_NH = 0.0 average_NHA = 0.0 average_NA = 0.0 num_samples = 300 for _ in range(num_samples): - RE.reaction(10) + RE.reaction(reaction_steps=10) average_NH += system.number_of_particles(type=types["H+"]) average_NHA += system.number_of_particles(type=types["HA"]) average_NA += system.number_of_particles(type=types["A-"]) @@ -133,84 +132,11 @@ def test_ideal_titration_curve(self): + f" average alpha: {average_alpha:.3f}" + f" target alpha: {target_alpha:.3f}" ) - - def test_reaction_system(self): - RE_status = ReactionEnsembleTest.RE.get_status() - forward_reaction = RE_status["reactions"][0] - for i in range(len(forward_reaction["reactant_types"])): - self.assertEqual( - ReactionEnsembleTest.reactant_types[i], - forward_reaction["reactant_types"][i], - msg="reactant type not set correctly.") - for i in range(len(forward_reaction["reactant_coefficients"])): - self.assertEqual( - ReactionEnsembleTest.reactant_coefficients[i], - forward_reaction["reactant_coefficients"][i], - msg="reactant coefficients not set correctly.") - for i in range(len(forward_reaction["product_types"])): - self.assertEqual( - ReactionEnsembleTest.product_types[i], - forward_reaction["product_types"][i], - msg="product type not set correctly.") - for i in range(len(forward_reaction["product_coefficients"])): - self.assertEqual( - ReactionEnsembleTest.product_coefficients[i], - forward_reaction["product_coefficients"][i], - msg="product coefficients not set correctly.") - - self.assertAlmostEqual( - ReactionEnsembleTest.temperature, - RE_status["kT"], - places=9, - msg="reaction ensemble kT not set correctly.") - self.assertAlmostEqual( - ReactionEnsembleTest.exclusion_radius, - RE_status["exclusion_radius"], - places=9, - msg="reaction ensemble exclusion radius not set correctly.") - - self.assertAlmostEqual( - ReactionEnsembleTest.volume, - ReactionEnsembleTest.RE.get_volume(), - places=9, - msg="reaction ensemble volume not set correctly.") - - def test_change_reaction_constant(self): - RE = ReactionEnsembleTest.RE - new_reaction_constant = 634.0 - RE.change_reaction_constant(0, new_reaction_constant) - RE_status = RE.get_status() - forward_reaction = RE_status["reactions"][0] - backward_reaction = RE_status["reactions"][1] - self.assertEqual( - new_reaction_constant, - forward_reaction["gamma"], - msg="new reaction constant was not set correctly.") - self.assertEqual( - 1.0 / new_reaction_constant, - backward_reaction["gamma"], - msg="new reaction constant was not set correctly.") - RE.change_reaction_constant(0, ReactionEnsembleTest.gamma) - - def test_delete_reaction(self): - RE = ReactionEnsembleTest.RE - RE.add_reaction( - gamma=1, - reactant_types=[5], - reactant_coefficients=[1], - product_types=[2, 3, 4], - product_coefficients=[1, 4, 3], - default_charges={5: 0, 2: 0, 3: 0, 4: 0}, - check_for_electroneutrality=True) - nr_reactions_after_addition = len(RE.get_status()["reactions"]) - RE.delete_reaction(1) - nr_reactions_after_deletion = len(RE.get_status()["reactions"]) - self.assertEqual( - 2, - nr_reactions_after_addition - nr_reactions_after_deletion, - msg="the difference in single reactions does not match,\ - deleting a full reaction (back and forward direction)\ - should result in deleting two single reactions.") + # for this setup, the acceptance rate is about 85% + rate0 = RE.get_acceptance_rate_reaction(reaction_id=0) + rate1 = RE.get_acceptance_rate_reaction(reaction_id=1) + self.assertAlmostEqual(rate0, 0.85, delta=0.05) + self.assertAlmostEqual(rate1, 0.85, delta=0.05) if __name__ == "__main__": diff --git a/testsuite/python/reaction_methods.py b/testsuite/python/reaction_methods.py new file mode 100644 index 00000000000..0a29f2eb4d5 --- /dev/null +++ b/testsuite/python/reaction_methods.py @@ -0,0 +1,273 @@ +# +# Copyright (C) 2013-2022 The ESPResSo project +# +# This file is part of ESPResSo. +# +# ESPResSo is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import espressomd +import espressomd.reaction_ensemble + +import unittest as ut + + +class ReactionMethods(ut.TestCase): + + """Test the reaction methods interface.""" + + system = espressomd.System(box_l=[10., 10., 10.]) + system.cell_system.skin = 0.4 + + def tearDown(self): + self.system.part.clear() + + def check_interface(self, method, kT, exclusion_radius, gamma): + def check_reaction_parameters(reactions, parameters): + for reaction, params in zip(reactions, parameters): + for key in reaction.required_keys(): + self.assertEqual(getattr(reaction, key), params[key]) + + reaction_forward = { + 'gamma': gamma, + 'reactant_types': [5], + 'reactant_coefficients': [1], + 'product_types': [2, 3], + 'product_coefficients': [1, 1], + 'default_charges': {5: 0, 2: 0, 3: 0}, + } + reaction_backward = { + 'gamma': 1. / gamma, + 'reactant_types': reaction_forward['product_types'], + 'reactant_coefficients': reaction_forward['product_coefficients'], + 'product_types': reaction_forward['reactant_types'], + 'product_coefficients': reaction_forward['reactant_coefficients'], + 'default_charges': reaction_forward['default_charges'], + } + + if isinstance(method, espressomd.reaction_ensemble.ConstantpHEnsemble): + method.add_reaction(gamma=reaction_forward['gamma'], + reactant_types=reaction_forward['reactant_types'], + product_types=reaction_forward['product_types'], + default_charges=reaction_forward['default_charges']) + else: + method.add_reaction(**reaction_forward) + reaction_parameters = (reaction_forward, reaction_backward) + + # check getters and setters + self.assertAlmostEqual(method.kT, kT, delta=1e-10) + self.assertAlmostEqual( + method.exclusion_radius, + exclusion_radius, + delta=1e-10) + self.assertAlmostEqual( + method.get_volume(), + self.system.volume(), + delta=1e-10) + method.set_volume(volume=1.) + self.assertAlmostEqual(method.get_volume(), 1., delta=1e-10) + self.assertEqual(method.get_non_interacting_type(), 100) + method.set_non_interacting_type(type=9) + self.assertEqual(method.get_non_interacting_type(), 9) + if isinstance(method, espressomd.reaction_ensemble.ConstantpHEnsemble): + self.assertAlmostEqual(method.constant_pH, 10., delta=1e-10) + method.constant_pH = 8. + self.assertAlmostEqual(method.constant_pH, 8., delta=1e-10) + + # check constraints + method.set_wall_constraints_in_z_direction( + slab_start_z=0.1, slab_end_z=0.9) + offsets = method.get_wall_constraints_in_z_direction() + self.assertAlmostEqual(offsets[0], 0.1, delta=1e-10) + self.assertAlmostEqual(offsets[1], 0.9, delta=1e-10) + method.remove_constraint() + + # check status + status = method.get_status() + self.assertEqual(status['kT'], kT) + self.assertEqual(status['exclusion_radius'], exclusion_radius) + self.assertEqual(len(status['reactions']), 2) + for reaction_flat, params in zip( + status['reactions'], reaction_parameters): + for key in reaction_flat: + if key == 'gamma': + self.assertAlmostEqual( + reaction_flat[key], params[key], delta=1e-10) + else: + self.assertEqual(reaction_flat[key], params[key]) + + # check reactions + reactions = method.reactions + self.assertEqual(len(reactions), 2) + check_reaction_parameters(method.reactions, reaction_parameters) + + # check reactions after parameter change + new_gamma = 634. + reaction_forward['gamma'] = new_gamma + reaction_backward['gamma'] = 1. / new_gamma + method.change_reaction_constant(reaction_id=0, gamma=new_gamma) + check_reaction_parameters(method.reactions, reaction_parameters) + status = method.get_status() + self.assertAlmostEqual( + status['reactions'][0]['gamma'], + reaction_forward['gamma'], + delta=1e-10) + self.assertAlmostEqual( + status['reactions'][1]['gamma'], + reaction_backward['gamma'], + delta=1e-10) + + # check particle deletion + p1, _, p3 = self.system.part.add( + pos=3 * [(0., 0., 0.)], type=[5, 2, 3]) + if isinstance(method, espressomd.reaction_ensemble.WidomInsertion): + potential_energy = method.calculate_particle_insertion_potential_energy( + reaction_id=0) + self.assertEqual(potential_energy, 0.) + method.delete_particle(p_id=p3.id) + self.assertEqual(len(self.system.part), 2) + method.delete_particle(p_id=p1.id) + self.assertEqual(len(self.system.part), 1) + self.system.part.clear() + + # check reaction deletion + method.delete_reaction(reaction_id=0) + self.assertEqual(len(method.reactions), 0) + + def test_interface(self): + # reaction ensemble + method = espressomd.reaction_ensemble.ReactionEnsemble( + kT=1.5, exclusion_radius=0.8, seed=12) + self.check_interface(method, kT=1.5, exclusion_radius=0.8, gamma=1.2) + + # constant pH ensemble + method = espressomd.reaction_ensemble.ConstantpHEnsemble( + kT=1.5, exclusion_radius=0.8, seed=12, constant_pH=10) + self.check_interface(method, kT=1.5, exclusion_radius=0.8, gamma=1.2) + + # Widom insertion + method = espressomd.reaction_ensemble.WidomInsertion(kT=1.6, seed=12) + self.check_interface(method, kT=1.6, exclusion_radius=0., gamma=1.) + + def test_exceptions(self): + single_reaction_params = { + 'gamma': 1., + 'reactant_types': [4], + 'reactant_coefficients': [1], + 'product_types': [2, 3], + 'product_coefficients': [1, 4], + } + reaction_params = { + 'default_charges': {2: 0, 3: 0, 4: 0}, + **single_reaction_params + } + widom = espressomd.reaction_ensemble.WidomInsertion(kT=1., seed=12) + method = espressomd.reaction_ensemble.ReactionEnsemble( + kT=1.5, exclusion_radius=0.8, seed=12) + method.add_reaction(**reaction_params) + widom.add_reaction(**reaction_params) + + # check invalid reactions + err_msg = 'number of types and coefficients have to match' + with self.assertRaisesRegex(ValueError, f'reactants: {err_msg}'): + method.add_reaction(**{**reaction_params, 'reactant_types': []}) + with self.assertRaisesRegex(ValueError, f'products: {err_msg}'): + method.add_reaction(**{**reaction_params, 'product_types': []}) + + # check charge conservation + err_msg = 'Reaction system is not charge neutral' + with self.assertRaisesRegex(ValueError, err_msg): + method.add_reaction(default_charges={2: 8, 3: 0, 4: -50}, + **single_reaction_params) + with self.assertRaisesRegex(ValueError, err_msg): + method.add_reaction(default_charges={2: 1, 3: 0, 4: 1 + 1e-10}, + **single_reaction_params) + + # check invalid reaction id exceptions + # (note: reactions id = 2 * reactions index) + self.assertEqual(len(method.reactions), 2) + for i in [-2, -1, 1, 2, 3]: + with self.assertRaisesRegex(IndexError, 'This reaction is not present'): + method.delete_reaction(reaction_id=i) + with self.assertRaisesRegex(IndexError, 'This reaction is not present'): + method.get_acceptance_rate_reaction(reaction_id=2 * i) + + # check constraint exceptions + set_cyl_constraint = method.set_cylindrical_constraint_in_z_direction + set_slab_constraint = method.set_wall_constraints_in_z_direction + get_slab_constraint = method.get_wall_constraints_in_z_direction + err_msg = "no slab constraint is currently active" + with self.assertRaisesRegex(RuntimeError, err_msg): + get_slab_constraint() + set_slab_constraint(slab_start_z=0.1, slab_end_z=0.9) + method.remove_constraint() + with self.assertRaisesRegex(RuntimeError, err_msg): + get_slab_constraint() + + # check invalid constraints + with self.assertRaisesRegex(ValueError, "center_x is outside the box"): + set_cyl_constraint(center_x=100., center_y=1., radius=1.) + with self.assertRaisesRegex(ValueError, "center_x is outside the box"): + set_cyl_constraint(center_x=-10., center_y=1., radius=1.) + with self.assertRaisesRegex(ValueError, "center_y is outside the box"): + set_cyl_constraint(center_y=100., center_x=1., radius=1.) + with self.assertRaisesRegex(ValueError, "center_y is outside the box"): + set_cyl_constraint(center_y=-10., center_x=1., radius=1.) + with self.assertRaisesRegex(ValueError, "radius is invalid"): + set_cyl_constraint(center_x=1., center_y=1., radius=-1.) + with self.assertRaisesRegex(ValueError, "slab_start_z is outside the box"): + set_slab_constraint(slab_start_z=100., slab_end_z=1.) + with self.assertRaisesRegex(ValueError, "slab_start_z is outside the box"): + set_slab_constraint(slab_start_z=-10., slab_end_z=1.) + with self.assertRaisesRegex(ValueError, "slab_end_z is outside the box"): + set_slab_constraint(slab_end_z=100., slab_start_z=1.) + with self.assertRaisesRegex(ValueError, "slab_end_z is outside the box"): + set_slab_constraint(slab_end_z=-10., slab_start_z=1.) + with self.assertRaisesRegex(ValueError, "slab_end_z must be >= slab_start_z"): + set_slab_constraint(slab_start_z=10., slab_end_z=1.) + + # check exceptions for missing particles + with self.assertRaisesRegex(RuntimeError, "Particle id is greater than the max seen particle id"): + method.delete_particle(p_id=0) + with self.assertRaisesRegex(RuntimeError, "Trying to remove some non-existing particles from the system via the inverse Widom scheme"): + widom.calculate_particle_insertion_potential_energy(reaction_id=0) + + # check other exceptions + with self.assertRaisesRegex(ValueError, "Invalid value for 'volume'"): + method.set_volume(volume=-10.) + with self.assertRaisesRegex(RuntimeError, r"unknown method 'unknown\(\)'"): + method.call_method('unknown', x=1) + err_msg = r"Only the following keys can be given as keyword arguments: \[.+\], got \[.+\] \(unknown \['x'\]\)" + with self.assertRaisesRegex(ValueError, err_msg): + espressomd.reaction_ensemble.SingleReaction( + x=1, **single_reaction_params) + with self.assertRaisesRegex(ValueError, err_msg): + espressomd.reaction_ensemble.ReactionEnsemble( + kT=1., exclusion_radius=1., seed=12, x=1) + with self.assertRaisesRegex(ValueError, err_msg): + espressomd.reaction_ensemble.ConstantpHEnsemble( + kT=1., exclusion_radius=1., seed=12, x=1, constant_pH=2) + with self.assertRaisesRegex(ValueError, err_msg): + espressomd.reaction_ensemble.WidomInsertion( + kT=1., seed=12, x=1) + with self.assertRaisesRegex(ValueError, "Invalid value for 'kT'"): + espressomd.reaction_ensemble.ReactionEnsemble( + kT=-1., exclusion_radius=1., seed=12) + with self.assertRaisesRegex(ValueError, "Invalid value for 'exclusion_radius'"): + espressomd.reaction_ensemble.ReactionEnsemble( + kT=1., exclusion_radius=-1., seed=12) + + +if __name__ == "__main__": + ut.main() diff --git a/testsuite/python/domain_decomposition.py b/testsuite/python/regular_decomposition.py similarity index 97% rename from testsuite/python/domain_decomposition.py rename to testsuite/python/regular_decomposition.py index 741a79ed13a..55b2ee62f29 100644 --- a/testsuite/python/domain_decomposition.py +++ b/testsuite/python/regular_decomposition.py @@ -23,12 +23,12 @@ np.random.seed(42) -class DomainDecomposition(ut.TestCase): +class RegularDecomposition(ut.TestCase): system = espressomd.System(box_l=3 * [50.0]) original_node_grid = tuple(system.cell_system.node_grid) def setUp(self): - self.system.cell_system.set_domain_decomposition( + self.system.cell_system.set_regular_decomposition( use_verlet_lists=False) self.system.cell_system.node_grid = self.original_node_grid self.system.time_step = 1e-3 diff --git a/testsuite/python/test_checkpoint.py b/testsuite/python/test_checkpoint.py index 90588a8439f..5eefa05cc53 100644 --- a/testsuite/python/test_checkpoint.py +++ b/testsuite/python/test_checkpoint.py @@ -329,7 +329,9 @@ def test_bonded_inter(self): self.assertEqual( ibm_volcons_bond.params, {'softID': 15, 'kappaV': 0.01}) if 'DP3M.CPU' not in modes: - self.assertEqual(ibm_tribend_bond.params, {'kb': 2., 'theta0': 0.}) + self.assertEqual( + ibm_tribend_bond.params, + {'kb': 2., 'theta0': 0., 'refShape': 'Initial'}) self.assertEqual( ibm_triel_bond.params, {'k1': 1.1, 'k2': 1.2, 'maxDist': 1.6, 'elasticLaw': 'NeoHookean'}) diff --git a/testsuite/python/virtual_sites_relative.py b/testsuite/python/virtual_sites_relative.py index 9d0f2fc87a8..cefc2d2b86c 100644 --- a/testsuite/python/virtual_sites_relative.py +++ b/testsuite/python/virtual_sites_relative.py @@ -307,9 +307,9 @@ def test_lj(self): system.cell_system.skin = 0.4 system.cell_system.set_n_square(use_verlet_lists=True) self.run_test_lj() - system.cell_system.set_domain_decomposition(use_verlet_lists=True) + system.cell_system.set_regular_decomposition(use_verlet_lists=True) self.run_test_lj() - system.cell_system.set_domain_decomposition(use_verlet_lists=False) + system.cell_system.set_regular_decomposition(use_verlet_lists=False) self.run_test_lj() @utx.skipIfMissingFeatures("EXTERNAL_FORCES") diff --git a/testsuite/python/widom_insertion.py b/testsuite/python/widom_insertion.py index b2391d8558c..09fab8ee348 100644 --- a/testsuite/python/widom_insertion.py +++ b/testsuite/python/widom_insertion.py @@ -70,14 +70,13 @@ class WidomInsertionTest(ut.TestCase): system.cell_system.set_n_square() np.random.seed(69) # make reaction code fully deterministic system.cell_system.skin = 0.4 - volume = system.volume() Widom = espressomd.reaction_ensemble.WidomInsertion( kT=TEMPERATURE, seed=1) # Set the hidden particle type to the lowest possible number to speed # up the simulation - Widom.set_non_interacting_type(1) + Widom.set_non_interacting_type(type=1) def setUp(self): self.system.part.add(pos=0.5 * self.system.box_l, type=self.TYPE_HA) @@ -101,7 +100,7 @@ def test_widom_insertion(self): for _ in range(num_samples): # 0 for insertion reaction particle_insertion_potential_energy = self.Widom.calculate_particle_insertion_potential_energy( - 0) + reaction_id=0) particle_insertion_potential_energy_samples.append( particle_insertion_potential_energy)