diff --git a/.gitlab/os.yml b/.gitlab/os.yml index 28c42b369..380b48312 100644 --- a/.gitlab/os.yml +++ b/.gitlab/os.yml @@ -4,7 +4,7 @@ .sys_config: variables: LLNL_SERVICE_USER: sphapp - UPSTREAM_DIR: /usr/WS2/wciuser/Spheral/spheral-spack-tpls + UPSTREAM_DIR: /usr/WS2/sduser/Spheral/spack_upstream/0.22 DISPLAY: ':0.0' .on_toss_3_x86: diff --git a/.gitlab/scripts.yml b/.gitlab/scripts.yml index 069bfaf6a..4b3612395 100644 --- a/.gitlab/scripts.yml +++ b/.gitlab/scripts.yml @@ -12,7 +12,6 @@ - cd $CI_BUILD_DIR - echo $SPEC - - ml load python/3 - $BUILD_ALLOC ./$SCRIPT_DIR/gitlab/build_and_install.py --spec="$SPEC" --extra-cmake-args="$EXTRA_CMAKE_ARGS" .build_and_test: diff --git a/.uberenv_config.json b/.uberenv_config.json index 475e3f325..96cc40f8d 100644 --- a/.uberenv_config.json +++ b/.uberenv_config.json @@ -3,7 +3,7 @@ "package_version" : "develop", "package_source_dir" : "../../..", "spack_url" : "https://github.com/spack/spack", - "spack_commit" : "5e0d2107348eed6cbe6deca43a30f5b06c5e40af", + "spack_commit" : "5fe93fee1eec46a0750bd340198bffcb92ff9eec", "spack_configs_path" : "scripts/spack/configs", "spack_packages_path" : "scripts/spack/packages" } diff --git a/Dockerfile b/Dockerfile index ed9b467ea..41a840fac 100644 --- a/Dockerfile +++ b/Dockerfile @@ -31,7 +31,7 @@ ARG HOST_CONFIG=docker-$SPEC ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update -y RUN apt-get upgrade -y -RUN apt-get install -y build-essential git gfortran mpich autotools-dev autoconf sqlite pkg-config uuid gettext cmake libncurses-dev libgdbm-dev libffi-dev libssl-dev libexpat-dev libreadline-dev liblapack-dev libbz2-dev locales python python3 unzip libtool wget curl tk-dev +RUN apt-get install -y build-essential git gfortran mpich autotools-dev autoconf sqlite pkg-config uuid gettext cmake libncurses-dev libgdbm-dev libffi-dev libssl-dev libexpat-dev libreadline-dev libbz2-dev locales python python3 unzip libtool wget curl tk-dev # Setup system locale for pip package encoding/decoding RUN locale-gen en_US.UTF-8 diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index f00db1442..231bbd7d9 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,3 +1,34 @@ +Version v2024.06.0 -- Release date 2024-06-27 +============================================== + * Important Notes: + * External users of the code will need to supply config files for tpl-manager to find system libraries correctly. Steps to do this are detailed in the external user build guide. + +Notable changes include: + + * New features / API changes: + * Added MFV hydro from Hopkins 2015 with extension for ALE options. + * Adding optional user specified smoothing scale method for SPH, FSISPH, and CRKSPH. + + * Build changes / improvements: + * PYBind11 libraries no longer depend on the structure of the PYB11 source directory. + * CMake interface for adding PYBind11 target libraries is modified to more closely match how C++ libraries are created. + * Multiple Spheral Python modules / CMake targets can be specified for a single directory. + * KernelIntegrator and FieldList directories are divided into 2 modules / targets. + * tpl-manager.py will no longer use generic x86_64 configs for non LC systems. Users will be required to supply their own configs for pointing spack at external packages. + * Spack version is increased from 0.19 to 0.22. + * Spack upstream is updated. + * Removed the python 3 module load for the Gitlab CI to fix an issue with pkg-config changing. + * Zlib target and TPL cmake file is removed. + * PYB11Generator repo is updated. + * Spack config and package files inside Spheral are updated to accommodate Spack 0.22. + * Package recipes for py-numpy-stl, py-pillow, py-pipreqs, td, and tk are removed. + * Versions for python dependencies in the Spheral spack recipe are fixed and updated (in some cases). + + * Bug Fixes / improvements: + * Corrected an erroneous VERIFY in the P-alpha porosity constructor (with Fields of porosity and sound speed) that forced runs to stop even with correct input parameters + * Fixed a bug in the standard ASPH hydros (ASPH, SolidASPH, and RZ varieties) that gave incorrect results. FSI ad CRK models with ASPH smoothing scales were OK, but standard + SPH using ASPH smoothing scales were simply incorrect for non-unit aspect ratio H's. Also added ATS tests to help catch such errors going forward. + Version v2024.01.1 -- Release date 2024-02-17 ============================================== * Important Notes: @@ -9,15 +40,22 @@ Notable changes include: * Adding an optional second-stage problem start-up hook to the Physics package interface: Physics::initializeProblemStartupDependencies. The idea is to keep basic sizing of arrays and such in the first stage (Physics::initializeProblemStartup), while this new hook is used for updating any initial Physics state (and therefore provides a State and StateDerivatives object). - + * DEM + * new field list to track max particle overlap + * user can optional turn off fast time stepping + * Build changes / improvements: - * + * Improved the target export functionality. * Bug Fixes / improvements: * Fixed bug with ConstantBoundary in the presence of porosity with the new porosity models introduced in v2024.01.00. * Updating header lists for including Spheral modules in external projects. * Adding effective viscous pressure back to FSISPH. * Initial volumes for damage models were incorrectly not taking into account pore space when computing failure statistics for seeding flaws. Fixed. + * DEM + * fixed bug in solid boundary unique indices that causes particle sticking + * fixed bug in solid boundary update policies + * fixed solid boundary restartability for moving bcs Version v2024.01.00 -- Release date 2024-01-19 ============================================== diff --git a/cmake/CMakeDefinitions.cmake b/cmake/CMakeDefinitions.cmake index 7c892941a..d8b2f2340 100644 --- a/cmake/CMakeDefinitions.cmake +++ b/cmake/CMakeDefinitions.cmake @@ -73,5 +73,5 @@ if (ENABLE_TIMER) endif() if (ENABLE_MPI) - add_definitions(-DUSE_MPI=1) + add_definitions(-DUSE_MPI=1) endif() diff --git a/cmake/FindSphinx.cmake b/cmake/FindSphinx.cmake deleted file mode 100644 index e7697380b..000000000 --- a/cmake/FindSphinx.cmake +++ /dev/null @@ -1,19 +0,0 @@ -if (NOT SPHINX_EXECUTABLE) - - find_program(SPHINX_EXECUTABLE NAMES sphinx-build - HINTS - $ENV{SPHINX_DIR} - PATH_SUFFIXES bin - DOC "Sphinx documentation generator" - ) - - include(FindPackageHandleStandardArgs) - - find_package_handle_standard_args(Sphinx DEFAULT_MSG - SPHINX_EXECUTABLE - ) - -endif() - -message("-- Sphinx executable: ${SPHINX_EXECUTABLE}") -mark_as_advanced(SPHINX_EXECUTABLE) diff --git a/cmake/InstallTPLs.cmake b/cmake/InstallTPLs.cmake index f556243ff..f36fb0f16 100644 --- a/cmake/InstallTPLs.cmake +++ b/cmake/InstallTPLs.cmake @@ -19,6 +19,7 @@ if (NOT ENABLE_CXXONLY) # Find the appropriate Python find_package(Python3 COMPONENTS Interpreter Development) set(PYTHON_EXE ${Python3_EXECUTABLE}) + set(SPHERAL_SITE_PACKAGES_PATH "lib/python${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}/site-packages" ) list(APPEND SPHERAL_BLT_DEPENDS Python3::Python) # Set the PYB11Generator path @@ -88,7 +89,7 @@ foreach(_comp ${AXOM_COMPONENTS_ENABLED}) endforeach() # TPLs that must be imported -list(APPEND SPHERAL_EXTERN_LIBS zlib boost eigen qhull silo hdf5 polytope) +list(APPEND SPHERAL_EXTERN_LIBS boost eigen qhull silo hdf5 polytope) blt_list_append( TO SPHERAL_EXTERN_LIBS ELEMENTS aneos IF ENABLE_ANEOS) blt_list_append( TO SPHERAL_EXTERN_LIBS ELEMENTS opensubdiv IF ENABLE_OPENSUBDIV) @@ -107,6 +108,7 @@ endforeach() if (EXISTS ${EXTERNAL_SPHERAL_TPL_CMAKE}) include(${EXTERNAL_SPHERAL_TPL_CMAKE}) endif() + # Copied from serac, needed to bypass generator expression issue during export set(_props) if( ${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.13.0" ) diff --git a/cmake/SetupSpheral.cmake b/cmake/SetupSpheral.cmake index 4d1ef5768..f1ef324f9 100644 --- a/cmake/SetupSpheral.cmake +++ b/cmake/SetupSpheral.cmake @@ -5,7 +5,6 @@ include(ExternalProject) #------------------------------------------------------------------------------- set(CMAKE_CXX_STANDARD 17) set(CMAKE_EXPORT_COMPILE_COMMANDS On) -set(CMAKE_EXPORT_COMPILE_COMMANDS On) if (NOT SPHERAL_CMAKE_MODULE_PATH) set(SPHERAL_CMAKE_MODULE_PATH "${SPHERAL_ROOT_DIR}/cmake") @@ -27,6 +26,7 @@ set(Python3_EXECUTABLE ${python_DIR}/bin/python3) set(ENABLE_MPI ON CACHE BOOL "") set(ENABLE_OPENMP ON CACHE BOOL "") +set(BLT_DOCS_TARGET_NAME "blt_docs" CACHE STRING "") if(NOT SPHERAL_BLT_DIR) set (SPHERAL_BLT_REL_DIR "${SPHERAL_ROOT_DIR}/cmake/blt" CACHE PATH "") @@ -83,8 +83,6 @@ if(ENABLE_CUDA) list(APPEND SPHERAL_CXX_DEPENDS cuda) endif() -option(BOOST_HEADER_ONLY "only use the header only components of Boost" OFF) - #-------------------------------------------------------------------------------# # Set a default build type if none was specified #-------------------------------------------------------------------------------# diff --git a/cmake/SpheralVersion.cmake b/cmake/SpheralVersion.cmake index 0e62b972d..d894dfec7 100644 --- a/cmake/SpheralVersion.cmake +++ b/cmake/SpheralVersion.cmake @@ -1 +1 @@ -set(SPHERAL_VERSION 2024.01.1) +set(SPHERAL_VERSION 2024.06.0) diff --git a/cmake/blt b/cmake/blt index 29eecef76..b7314a86e 160000 --- a/cmake/blt +++ b/cmake/blt @@ -1 +1 @@ -Subproject commit 29eecef7652728ec37d9fe0b7d7f41e3184ac7eb +Subproject commit b7314a86e9fc78baf2682ad55509bbc8acd4bce6 diff --git a/cmake/spheral/InstantiateCXX.cmake b/cmake/spheral/InstantiateCXX.cmake index 5f16244e6..9788af1d2 100644 --- a/cmake/spheral/InstantiateCXX.cmake +++ b/cmake/spheral/InstantiateCXX.cmake @@ -40,7 +40,7 @@ function(instantiate _inst_var _source_var) # Uses BLT's python for instantiations to work when building CXX_ONLY as well as with python add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_inst_file} DEPENDS ${_inst_py} - COMMAND ${PYTHON_EXECUTABLE} ${SPHERAL_ROOT_DIR}/src/helpers/InstantiationGenerator.py ${_inst_py} ${_inst_file} ${_dim} + COMMAND ${Python3_EXECUTABLE} ${SPHERAL_ROOT_DIR}/src/helpers/InstantiationGenerator.py ${_inst_py} ${_inst_file} ${_dim} BYPRODUCTS ${_inst_file} COMMENT "Generating instantiation ${_inst_file}..." ) diff --git a/cmake/spheral/SpheralAddLibs.cmake b/cmake/spheral/SpheralAddLibs.cmake index becce9d52..c4e0e20e2 100644 --- a/cmake/spheral/SpheralAddLibs.cmake +++ b/cmake/spheral/SpheralAddLibs.cmake @@ -123,26 +123,35 @@ function(spheral_add_cxx_library package_name _cxx_obj_list) set_target_properties(Spheral_${package_name} PROPERTIES INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") endfunction() +#---------------------------------------------------------------------------------------- +# spheral_add_pybin11_library_package +#---------------------------------------------------------------------------------------- +# ------------------------------------------- +# VARIABLES THAT NEED TO BE PREVIOUSLY DEFINED +# ------------------------------------------- +# SPHERAL_BLT_DEPENDS : REQUIRED : List of external dependencies +# EXTRA_PYB11_SPHERAL_ENV_VARS : OPTIONAL : Additional directories containing python filed, used by LLNLSpheral +# _headers : OPTIONAL : List of necessary headers to include +# _sources : OPTIONAL : List of necessary source files to include +# SPHERAL_SUBMOD_DEPENDS : REQUIRED : List of submodule dependencies +# ---------------------- +# INPUT-OUTPUT VARIABLES +# ---------------------- +# package_name : REQUIRED : Desired package name +# module_list_name : REQUIRED : The NAME of the global variable that is the list of +# Spheral python modules (not the list itself) +# INCLUDES : OPTIONAL : Target specific includes +# DEPENDS : OPTIONAL : Target specific dependencies +# SOURCE : OPTIONAL : Target specific sources +# ----------------------- +# OUTPUT VARIABLES TO USE - Made available implicitly after function call +# ----------------------- +# Spheral : Target for a given Spheral python module +# Spheral_src : Target for the PYB11Generated source code for a given Spheral module +# : List of Spheral python modules, appended with current module name +#---------------------------------------------------------------------------------------- -#----------------------------------------------------------------------------------- -# spheral_add_pybind11_library -# - Generate the python friendly Spheral package lib -# -# Args: -# package_name : *name* of spheral package to make into a library -# INCLUDES : optional, any additional include paths -# SOURCES : optional, any additional source files to compile into the library -# DEPENDS : optional, extra dependencies -# -# Variables that must be set before calling spheral_add_obj_library: -# spheral_depends -# - List of targets the library depends on -# SPHERAL_BLT_DEPENDS -# - List of blt/libs the library depends on -# -#----------------------------------------------------------------------------------- - -function(spheral_add_pybind11_library package_name) +function(spheral_add_pybind11_library package_name module_list_name) # Define our arguments set(options ) @@ -205,8 +214,6 @@ function(spheral_add_pybind11_library package_name) # Get the TPL dependencies get_property(SPHERAL_BLT_DEPENDS GLOBAL PROPERTY SPHERAL_BLT_DEPENDS) - get_property(spheral_tpl_includes GLOBAL PROPERTY spheral_tpl_includes) - get_property(spheral_tpl_libraries GLOBAL PROPERTY spheral_tpl_libraries) # If building shared libraries, use the SPHERAL_OBJ_LIBS global list # Note, LLNLSpheral has appended any local targets to this list as well if(ENABLE_DEV_BUILD) @@ -222,8 +229,7 @@ function(spheral_add_pybind11_library package_name) SOURCE ${package_name}_PYB11.py DEPENDS ${SPHERAL_BLT_DEPENDS} ${SPHERAL_CXX_DEPENDS} ${EXTRA_CXX_DEPENDS} ${SPHERAL_DEPENDS} PYTHONPATH ${PYTHON_ENV_STR} - INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} ${SPHERAL_INCLUDES} ${${package_name}_INCLUDES} ${spheral_tpl_includes} ${PYBIND11_ROOT_DIR}/include - LINKS ${spheral_tpl_libraries} + INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} ${${package_name}_INCLUDES} ${PYBIND11_ROOT_DIR}/include COMPILE_OPTIONS ${SPHERAL_PYB11_TARGET_FLAGS} USE_BLT ON EXTRA_SOURCE ${${package_name}_SOURCES} @@ -233,9 +239,9 @@ function(spheral_add_pybind11_library package_name) target_compile_options(${MODULE_NAME} PRIVATE ${SPHERAL_PYB11_TARGET_FLAGS}) install(TARGETS ${MODULE_NAME} - DESTINATION Spheral + DESTINATION ${SPHERAL_SITE_PACKAGES_PATH}/Spheral ) - + set_property(GLOBAL APPEND PROPERTY ${module_list_name} ${package_name}) # Set the r-path of the C++ lib such that it is independent of the build dir when installed set_target_properties(${MODULE_NAME} PROPERTIES INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") diff --git a/cmake/spheral/SpheralInstallPythonFiles.cmake b/cmake/spheral/SpheralInstallPythonFiles.cmake index 78f9081b8..01f90b749 100644 --- a/cmake/spheral/SpheralInstallPythonFiles.cmake +++ b/cmake/spheral/SpheralInstallPythonFiles.cmake @@ -7,13 +7,14 @@ # Note, if ENABLE_CXXONLY is set, this function does nothing #----------------------------------------------------------------------------------- + function(spheral_install_python_files) if (NOT ENABLE_CXXONLY) install(FILES ${ARGV} - DESTINATION Spheral) + DESTINATION ${SPHERAL_SITE_PACKAGES_PATH}/Spheral) install(CODE "execute_process( \ - COMMAND ${PYTHON_EXE} -m compileall Spheral \ + COMMAND ${PYTHON_EXE} -m compileall DESTINATION ${SPHERAL_SITE_PACKAGES_PATH}/Spheral \ WORKING_DIRECTORY ${CMAKE_INSTALL_PREFIX})") endif() diff --git a/cmake/spheral_cxx-config.cmake.in b/cmake/spheral_cxx-config.cmake.in index d982d2945..28da5f0da 100644 --- a/cmake/spheral_cxx-config.cmake.in +++ b/cmake/spheral_cxx-config.cmake.in @@ -1,17 +1,47 @@ @PACKAGE_INIT@ -if(NOT axom_DIR) - set(axom_DIR "@axom_DIR@" CACHE PATH "") -endif() -set(SPHERAL_BLT_DIR "@SPHERAL_BLT_DIR@") -set(SPHERAL_CXX_INSTALL_PREFIX "@CMAKE_INSTALL_PREFIX@") -set(SPHERAL_CXX_INCLUDE_DIRS "${SPHERAL_CXX_INSTALL_PREFIX}/include") -if(NOT TARGET axom) - find_package(axom REQUIRED QUIET NO_DEFAULT_PATH PATHS ${axom_DIR} ${axom_DIR}/lib ${axom_DIR}/lib/cmake) -endif() -include("${SPHERAL_CXX_INSTALL_PREFIX}/lib/cmake/spheral_cxx-targets.cmake") +if(NOT SPHERAL_FOUND) + #---------------------------------------------------------------------------- + # Set user configuration options and features + #---------------------------------------------------------------------------- + + # Language features + set(SPHERAL_ENABLE_MPI "@ENABLE_MPI@") + set(SPHERAL_ENABLE_OPENMP "@ENABLE_OPENMP@") + set(SPHERAL_ENABLE_CUDA "@ENABLE_CUDA@") + if(NOT axom_DIR) + set(axom_DIR "@axom_DIR@" CACHE PATH "") + endif() -set_property(TARGET Spheral_CXX - APPEND PROPERTY - INTERFACE_INCLUDE_DIRECTORIES ${SPHERAL_CXX_INCLUDE_DIRS}) + set(SPHERAL_CXX_INSTALL_PREFIX "@CMAKE_INSTALL_PREFIX@") + set(SPHERAL_CXX_INCLUDE_DIRS "${SPHERAL_CXX_INSTALL_PREFIX}/include") + if(NOT @ENABLE_CXXONLY@ AND NOT TARGET Python3) + set(python_DIR "@python_DIR@") + set(Python_EXECUTABLE ${python_DIR}/bin/python3) + set(Python3_EXECUTABLE ${python_DIR}/bin/python3) + find_package(Python3 COMPONENTS Interpreter Development) + set(PYTHON_EXE ${Python3_EXECUTABLE}) + endif() + + if(NOT TARGET axom) + find_package(axom REQUIRED QUIET NO_DEFAULT_PATH PATHS ${axom_DIR} ${axom_DIR}/lib ${axom_DIR}/lib/cmake) + endif() + if(SPHERALC_STANDALONE) + set(CMAKE_C_COMPILER "@CMAKE_C_COMPILER@" CACHE PATH "Spheral C compiler path") + set(CMAKE_CXX_COMPILER "@CMAKE_CXX_COMPILER@" CACHE PATH "Spheral C++ compiler path") + set(CMAKE_Fortran_COMPILER "@CMAKE_Fortran_COMPILER@" CACHE PATH "Spheral C++ compiler path") + set(ENABLE_MPI @ENABLE_MPI@ CACHE BOOL "") + if(SPHERAL_ENABLE_MPI) + set(MPI_C_COMPILER "@MPI_C_COMPILER@" CACHE PATH "") + set(MPI_CXX_COMPILER "@MPI_CXX_COMPILER@" CACHE PATH "") + set(MPI_Fortran_COMPILER "@MPI_Fortran_COMPILER@" CACHE PATH "") + endif() + endif() + include("${SPHERAL_CXX_INSTALL_PREFIX}/lib/cmake/spheral_cxx-targets.cmake") + + set_property(TARGET Spheral_CXX + APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES ${SPHERAL_CXX_INCLUDE_DIRS}) + set(SPHERAL_FOUND TRUE) +endif() diff --git a/cmake/tpl/zlib.cmake b/cmake/tpl/zlib.cmake deleted file mode 100644 index 9d79e5656..000000000 --- a/cmake/tpl/zlib.cmake +++ /dev/null @@ -1,5 +0,0 @@ -set(${lib_name}_libs libz.so) - -if(ENABLE_STATIC_TPL) - string(REPLACE ".so" ".a;" ${lib_name}_libs ${${lib_name}_libs}) -endif() diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 997b86da2..e7988c0ec 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -1,11 +1,4 @@ -#find_package(Sphinx) -set(SPHINX_FOUND true) -if (NOT DEFINED SPHINX_EXEC) - set(SPHINX_EXEC "${sphinx_DIR}/bin/sphinx-build" CACHE STRING "Path to sphinx documentation generator") -endif() -message("-- Sphinx ${SPHINX_EXEC}") - -if(ENABLE_DOCS AND NOT SPHINX_EXEC STREQUAL "SPHINX_EXEC-NOTFOUND") +if(ENABLE_DOCS) if(NOT DEFINED SPHINX_THEME) set(SPHINX_THEME sphinx_rtd_theme) endif() @@ -28,8 +21,8 @@ if(ENABLE_DOCS AND NOT SPHINX_EXEC STREQUAL "SPHINX_EXEC-NOTFOUND") "${BINARY_BUILD_DIR}/conf.py" @ONLY) - add_custom_target(Spheral_docs ALL - ${CMAKE_COMMAND} -E env PYTHONPATH=${SPACK_PYTHONPATH} ${PYTHON_EXE} -m sphinx + add_custom_target(docs ALL + env PYTHONPATH=${SPACK_PYTHONPATH} ${PYTHON_EXE} -m sphinx -q -b html -c "${BINARY_BUILD_DIR}" -d "${SPHINX_CACHE_DIR}" diff --git a/docs/build_guide/external/index.rst b/docs/build_guide/external/index.rst index 46640c394..ca2d65dc2 100644 --- a/docs/build_guide/external/index.rst +++ b/docs/build_guide/external/index.rst @@ -9,7 +9,7 @@ This guide is designed to help external users build and install Spheral on non L :caption: External Building Guide: quickstart.rst - ubuntu_update.rst + system_packages.rst cloning.rst updating.rst tpls.rst diff --git a/docs/build_guide/external/system_packages.rst b/docs/build_guide/external/system_packages.rst new file mode 100644 index 000000000..b812def28 --- /dev/null +++ b/docs/build_guide/external/system_packages.rst @@ -0,0 +1,12 @@ +.. _required system packages: + +Required System Packages +######################## + +This guide assumes the use of an Ubuntu 20.04 system using ``apt`` as the package manager. For other other operating systems please install the corresponding packages as seen below. + +:: + + sudo apt update + sudo apt upgrade + sudo apt install build-essential git gfortran mpich autotools-dev autoconf sqlite pkg-config uuid gettext cmake libncurses-dev libgdbm-dev libffi-dev libssl-dev libexpat-dev libreadline-dev liblapack-dev libbz2-dev locales python python3 unzip libtool wget curl tk-dev diff --git a/docs/build_guide/external/ubuntu_update.rst b/docs/build_guide/external/ubuntu_update.rst deleted file mode 100644 index af4ab14f2..000000000 --- a/docs/build_guide/external/ubuntu_update.rst +++ /dev/null @@ -1,14 +0,0 @@ -Updating Ubuntu -############### - -This guide assumes the use of an Ubuntu 20.04 system and using ``apt`` as the package manager. For other distrobutions please install the corresponding packages. - -.. note:: - Future steps (especially those detailed in :ref:`ex_tpl`) are assuming packages are installed under ``/usr/bin``, ``/usr/lib`` etc. - -:: - - sudo apt update - sudo apt upgrade - sudo apt install build-essential git gfortran mpich autotools-dev autoconf sqlite pkg-config uuid gettext cmake libncurses-dev libgdbm-dev libffi-dev libssl-dev libexpat-dev libreadline-dev liblapack-dev libbz2-dev locales python python3 unzip libtool wget curl tk-dev - diff --git a/docs/build_guide/include/quickstart.rst.inc b/docs/build_guide/include/quickstart.rst.inc index 44b1b1fba..e4b6e01b6 100644 --- a/docs/build_guide/include/quickstart.rst.inc +++ b/docs/build_guide/include/quickstart.rst.inc @@ -12,9 +12,6 @@ Update and install necessary package dependencies. sudo apt upgrade sudo apt install build-essential git gfortran mpich autotools-dev autoconf sqlite pkg-config uuid gettext cmake libncurses-dev libgdbm-dev libffi-dev libssl-dev libexpat-dev libreadline-dev liblapack-dev libbz2-dev locales python python3 unzip libtool wget curl tk-dev -.. warning:: - For alternative Linux distros your mileage may vary, ensure you are installing compatible packages to the ones listed above. - [ex_update_sys-section-end] .. @@ -47,6 +44,12 @@ Build our TPL dependencies from source with the Spheral tpl-management tool (``t .. note:: This command will generate a ``.cmake`` file with the naming convention ``-``. The following commands will refer to this format as ```` for generalization across operating systems and architectures. You will need to substitute the correct format in the following commands. +.. warning:: + For operating systems other than Ubuntu 20.04 you should get an error to the effect of: + ``[ERROR: invalid spack config dir: //scripts/spack/configs/ ]`` + You will **need** to follow the steps outlined in :ref:`ERROR: invalid spack config dir` before proceeding with this quickstart guide. + + [ex_tpl-section-end] [lc_tpl-section-start] diff --git a/docs/build_guide/include/tpls.rst.inc b/docs/build_guide/include/tpls.rst.inc index 909b507ab..3bf5d6de0 100644 --- a/docs/build_guide/include/tpls.rst.inc +++ b/docs/build_guide/include/tpls.rst.inc @@ -66,6 +66,38 @@ Above we are telling ``tpl-manager`` to build our TPLs with the ``gcc`` that is .. note:: Spheral minimally requires a C++14 compliant compiler. +ERROR: invalid spack config dir +=============================== + +If you are trying to run ``tpl-manager.py`` on an operating system other than Ubuntu20.04, you will +see an error to the effect of: +``[ERROR: invalid spack config dir: //scripts/spack/configs/ ]`` + +We define configuration files for Ubuntu20.04, as well as our common LLNL operating systems. +You will need to create a set of files for your own system. + +The configuration files tell spack where the packages installed in :ref:`Required System Packages` +are located and what version they are. We have provided a ``generic`` set of config files to +help in setting this up for you. + +#. Copy the directory ``scripts/spack/configs/generic`` to ``scripts/spack/config/`` (you want to match the name of the directory to the one ``tpl-manager.py`` expects to find). + + +#. For each package within the ``packages.py`` file of your new folder edit the version number to be the same as + what is installed on your system. There are a number of ways to retrieve versions for a given package: + + * Most version numbers should be searchable through your package manager, there are however, some system libraries that may not be managed by your package manager. + + * If the package has an executable, often you can run with some form of ``-V`` or ``--version``. e.g. for mpich:``mpiexec --version`` will report the MPI version. + + * For packages that only provide libraries, often the system library will be symlinked to one with the version as the extension. e.g. ``ls -lha /usr/lib/x86_64_gnu-linux/libreadlines.so`` will show it is symlinked to ``libreadlines.so.8.1``. + + +#. Each package requires the ``prefix:`` of the installation be provided. In most cases ``/usr`` is sufficient. Typically packages installed with a package manager will place files in: ``/usr/bin``, ``/usr/share``, ``/usr/lib``, ``/usr/lib64``, sometimes ``/usr/lib/x86_64.../``. Here the common prefix is ``/usr``. + + * If you are building Spheral on a system where you don't have permissions to run package manager and install to ``/usr``, then you might have installed the system packages somewhere else. In that case, replace the ``prefix:`` path for those given packages as necessary. + + [ex_running_tpl_manager-end] [lc_running_tpl_manager-start] diff --git a/docs/conf.py b/docs/conf.py index c6b2d7ed4..ac0e4fea6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -26,9 +26,9 @@ author = 'J. Michael Owen' # The short X.Y version -version = '2024.01.1' +version = '2024.06.0' # The full version, including alpha/beta/rc tags -release = '2024.01.1' +release = '2024.06.0' # -- General configuration --------------------------------------------------- diff --git a/docs/conf.py.in b/docs/conf.py.in index 5c6b055e1..312a41083 100644 --- a/docs/conf.py.in +++ b/docs/conf.py.in @@ -26,9 +26,9 @@ copyright = '2012, LLNS' author = 'J. Michael Owen' # The short X.Y version -version = '2024.01.1' +version = '2024.06.0' # The full version, including alpha/beta/rc tags -release = '2024.01.1' +release = '2024.06.0' # -- General configuration --------------------------------------------------- diff --git a/docs/developer/dev/continuous_deployment.rst b/docs/developer/dev/continuous_deployment.rst index dca78ad2d..623d6cbaf 100644 --- a/docs/developer/dev/continuous_deployment.rst +++ b/docs/developer/dev/continuous_deployment.rst @@ -9,11 +9,10 @@ to their environment. On LC systems we maintain: * ``Spheral/risky`` + * ``Spheral/2024.06.0`` * ``Spheral/2024.01.1`` * ``Spheral/2023.06.0`` * ``Spheral/2023.03.1`` - * ``Spheral/2023.03.0`` (deprecated as of TOSS4 upgrade) - * ``Spheral/2022.06.0`` (deprecated as of TOSS4 upgrade) Spheral/risky ============= diff --git a/docs/developer/dev/docker_dev_env.rst b/docs/developer/dev/docker_dev_env.rst new file mode 100644 index 000000000..3618b4125 --- /dev/null +++ b/docs/developer/dev/docker_dev_env.rst @@ -0,0 +1,75 @@ +******************************************* +Docker Development Environment +******************************************* + +Spheral builds an up-to-date docker container for every merge-request +on ``develop``. Developers can use this container to do development tasks +on local machines. + +=========================== +Creating a Dev Environment +=========================== + +We will use ``docker dev create`` with our spheral docker image and a +local repository. This will allow us to skip setting up a linux system with +external packages, gives us pre-built TPLs and allows us to edit a cloned +repository from our local machines IDE/text editor.bash:: + + > rm /compose-dev.yaml + > docker dev create --base-image ghcr.io/llnl/spheral --name --path -o + +.. note:: + You need to have **Docker Desktop**, **VSCode** and the **VSCode Dev Environment Extension** installed for this to work. You do not need to use VSCode to access the container, but the extension seems to do some of the lifting for us when setting up the volume to our local repo. + +**Output** :: + + spheral-recursing_darwin <---- Name of dev environment + Creating Dev Environment "spheral-recursing_darwin" + populating volume from /Users/davis291/Projects/spheral + Creating Dev Environment "spheral-recursing_darwin" + detecting language + Detecting main repo language... + building compose stack + building compose stack + starting compose stack + starting compose stack + Network spheral-recursing_darwin_default Creating + Network spheral-recursing_darwin_default Created + Container spheral-recursing_darwin-app-1 Creating + Container spheral-recursing_darwin-app-1 Created + Container spheral-recursing_darwin-app-1 Starting + Container spheral-recursing_darwin-app-1 Started <---- Name of running container to connect to. + Dev Environment "spheral-recursing_darwin" (5bd37219d27eb68a77ce6fd8fee05a533a52017d8dcc72430867e2471e428e58) is running!% + + +============================= +Connecting to a Dev Container +============================= + +Once the continaer has ben started you can connect directly through the terminal +with the **Container** name (**NOT** the **Dev Environment** name).:: + + > docker exec -it spheral-recursing_darwin-app-1 /bin/bash + root@671dab5d0b00:/home/spheral/workspace/build_docker-gcc/install# + +This drops you into the install location of the ``spheral@develop`` build from +github, this is a fully installed version of the latest ``develop`` spheral. + +.. tip:: + VSCode & Docker Desktop: + * Open **Docker Desktop** and navigate to the **Dev Environment** tab. + * Find the container name and select **OPEN IN VSCODE**. + + +============================= +Development Work +============================= + +Your local Spheral repo is mounted from your local filesystem. You can develop directly from your +IDE or text editor of choice. Then you can compile and run from within the container itself. + +- The local Spheral repository will be mounted in the container at ``/com.docker.devenvironments.code/``. + +- There already exists a full build and install of Spheral at ``develop`` in ``/home/spheral/workspace/build_docker-gcc/install``. + +- An updated host config file can be found at ``/home/spheral/wokspace/docker-gcc.cmake``. diff --git a/docs/developer/development_docs.rst b/docs/developer/development_docs.rst index 76aa90c6c..13401fb9f 100644 --- a/docs/developer/development_docs.rst +++ b/docs/developer/development_docs.rst @@ -9,6 +9,7 @@ Welcome to Spheral's developer documentation. This documentation is a work in pr dev/submodules.rst dev/diagnostic_tools.rst + dev/docker_dev_env.rst dev/release_process.rst dev/continuous_deployment.rst diff --git a/extern/PYB11Generator b/extern/PYB11Generator index 71608127d..df24db6ca 160000 --- a/extern/PYB11Generator +++ b/extern/PYB11Generator @@ -1 +1 @@ -Subproject commit 71608127d276e9135c2c81d65bd0822adb006e91 +Subproject commit df24db6ca94e4e5e7531fe7e3c0177d0af79e557 diff --git a/scripts/devtools/tpl-manager.py b/scripts/devtools/tpl-manager.py index 4ee543025..fa658f067 100755 --- a/scripts/devtools/tpl-manager.py +++ b/scripts/devtools/tpl-manager.py @@ -11,7 +11,7 @@ project_dir=os.path.abspath(os.path.join(os.path.realpath(__file__), "../../../")) default_spheral_spack_dir=os.path.join(os.getcwd(), "../spheral-spack-tpls") -default_upstream_dir="/usr/WS2/wciuser/Spheral/spheral-spack-tpls/spack/opt/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_p/" +default_upstream_dir="/usr/WS2/sduser/Spheral/spack_upstream/0.22/spack/opt/spack/__spack_path_placeholder__/__spack_path_placeholder__/__spack_path_p" uberenv_path = os.path.join(project_dir, "scripts/devtools/uberenv/uberenv.py") uberenv_project_json = os.path.join(os.getcwd(), ".uberenv_config.json") @@ -59,24 +59,24 @@ def parse_args(): parser.add_argument('--no-spec', action='store_true', help='Skip output of the dependency graph.') + parser.add_argument('--skip-init', action='store_true', + help='Skip setting up and configuring Spack.') + return parser.parse_args() # Helper function for executing commands stolen from uberenv -def sexe(cmd,ret_output=False,echo=False): +def sexe(cmd,ret_output=False,echo=True): """ Helper for executing shell commands. """ if echo: - print("[exe: {0}]".format(cmd)) + print("[exe: {0}]".format(cmd)) + p = subprocess.run(cmd, shell=True, + capture_output=ret_output, + check=True, text=True) if ret_output: - p = subprocess.Popen(cmd, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out = p.communicate()[0] - out = out.decode('utf8') - return p.returncode,out - else: - return subprocess.call(cmd,shell=True) + if echo: + print(p.stdout) + return p.stdout # Parse the json formatted spec list... @@ -89,30 +89,13 @@ def parse_spec_list(file_path): #------------------------------ # Dependencies #------------------------------ -def build_deps(args): +def build_spack(args): print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") - print("~~~~~ Building Dependencies") + print("~~~~~ Configuring Spack") print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") print("") print("{0}".format(project_dir)) - - # Figure out what specs this script is building TPLs for. - spec_list=[] - if args.spec_list: - spec_list = parse_spec_list(args.spec_list) - elif args.spec: - spec_list.append(args.spec) - else: - print("ERROR: Please define --spec or --spec-list, aborting...") - sys.exit(1) - for s in spec_list: - print("** SPEC : {0}".format(s)) - - spack_config_dir_opt="" - if "SYS_TYPE" not in os.environ.keys(): - spack_config_dir_opt="--spack-config-dir={0}".format(os.path.join(project_dir, "scripts/spack/configs/x86_64")) - spack_upstream_opt="" if os.path.isdir(args.upstream_dir) and not args.no_upstream: spack_upstream_opt="--upstream {0}".format(args.upstream_dir) @@ -124,17 +107,61 @@ def build_deps(args): # We use uberenv to set up our spack instance with our respective package.yaml files # config.yaml and custom spack packages recipes. print("** Running uberenv...") + prefix_opt="--prefix=" + args.spheral_spack_dir uberenv_project_json_opt="--project-json={0}".format(uberenv_project_json) print("** Spheral Spack Dir : {0}".format(args.spheral_spack_dir)) - sexe("python3 {0} --setup-only {1} {2} {3} {4} {5}".format(uberenv_path, prefix_opt, uberenv_project_json_opt, spack_config_dir_opt, spack_upstream_opt, uberenv_spack_url_opt), echo=True) # We just want to use the spac instance directly to generate our TPLs, we don't want # to have the spack instance take over our environment. os.environ["SPACK_DISABLE_LOCAL_CONFIG"] = "1" spack_cmd=os.path.join(args.spheral_spack_dir, "spack/bin/spack") + spheral_config_dir="scripts/spack/configs/" + spack_config_dir_opt="" + if "SYS_TYPE" not in os.environ.keys(): + # We need to install spack without any configuration files so we can use + # spack arch to determine the OS of the system and later to use spack find + # for generating external package files on external systems. + sexe("python3 {0} --setup-only {1} {2} {3} {4}".format(uberenv_path, prefix_opt, uberenv_project_json_opt, spack_upstream_opt, uberenv_spack_url_opt)) + + spack_arch_os = sexe("{0} arch -o".format(spack_cmd), ret_output=True, echo=False).strip() + print("INFO : Detected Operating System :{0}".format(spack_arch_os)) + + spheral_config_dir += spack_arch_os + + spack_config_dir_opt="--spack-config-dir={0}".format(os.path.join(project_dir, spheral_config_dir)) + else: + spheral_config_dir += os.environ["SYS_TYPE"] + + + # Setup spack w/ Uberenv and the appropriate external package/compiler configs. + sexe("python3 {0} --setup-only {1} {2} {3} {4} {5}".format(uberenv_path, prefix_opt, uberenv_project_json_opt, spack_config_dir_opt, spack_upstream_opt, uberenv_spack_url_opt)) + + # Uberenv doesn't copy the concretizer.yaml options... + if os.path.exists(spheral_config_dir+"/concretizer.yaml"): + sexe("cp {0}/concretizer.yaml {1}".format(spheral_config_dir, os.path.join(args.spheral_spack_dir, "spack/etc/spack/defaults"))) + +def build_deps(args): + print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") + print("~~~~~ Building Dependencies") + print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") + print("") + print("{0}".format(project_dir)) + # Figure out what specs this script is building TPLs for. + spec_list=[] + if args.spec_list: + spec_list = parse_spec_list(args.spec_list) + elif args.spec: + spec_list.append(args.spec) + else: + print("ERROR: Please define --spec or --spec-list, aborting...") + sys.exit(1) + for s in spec_list: + print("** SPEC : {0}".format(s)) + spack_cmd=os.path.join(args.spheral_spack_dir, "spack/bin/spack") + # Optionally add a parallel job number for spack builds if args.spack_jobs: spack_cmd += " --jobs={0}".format(args.spack_jobs) @@ -152,17 +179,22 @@ def build_deps(args): print("** Building TPL's and generating host-config for {0}%{1} ...".format(package_name,s)) os.environ["SPEC"] = s os.environ["LC_ALL"] = "en_US.UTF-8" + if not args.no_spec: - if sexe("{0} spec --fresh -I {1}@develop%{2}".format(spack_cmd, package_name, s), echo=True) : sys.exit(1) - if sexe("{0} dev-build --fresh --quiet --deprecated -u initconfig {2}@develop%{3} 2>&1 | tee -a \"dev-build-{3}-out.txt\"".format(spack_cmd, os.getcwd(), package_name, s), echo=True) : sys.exit(1) + sexe("{0} spec --fresh -IL {1}@develop%{2} 2>&1 | tee -a \"spec-info-{2}-out.txt\"".format(spack_cmd, package_name, s)) + + # Install only the dependencies for Spheral and create CMake configure file + sexe("{0} dev-build -q --fresh -u initconfig {1}@develop%{2} 2>&1 | tee -a \"tpl-build-{2}-out.txt\"".format(spack_cmd, package_name, s)) if not args.no_clean: - sexe("rm dev-build-* spack-build-* spack-configure-args.txt") + sexe("rm -f spec-info-* tpl-build-* spack-build-* spack-configure-args.txt") #------------------------------------------------------------------------------ def main(): args = parse_args() + if (not args.skip_init): + build_spack(args) build_deps(args) diff --git a/scripts/spack/configs/blueos_3_ppc64le_ib/packages.yaml b/scripts/spack/configs/blueos_3_ppc64le_ib/packages.yaml index 6f5cf30ad..60ab23b00 100644 --- a/scripts/spack/configs/blueos_3_ppc64le_ib/packages.yaml +++ b/scripts/spack/configs/blueos_3_ppc64le_ib/packages.yaml @@ -2,15 +2,42 @@ packages: all: target: [ppc64le] compiler: [gcc, clang] + providers: + blas: [netlib-lapack] + lapack: [netlib-lapack] + mpi: [spectrum-mpi] + zlib-api: [zlib] cmake: version: [3.20.2] buildable: false externals: - spec: cmake@3.20.2 prefix: /usr/tce/packages/cmake/cmake-3.20.2 + gmake: + version: [4.2.1] + buildable: false + externals: + - spec: gmake@4.2.1 + prefix: /usr + git: + version: [2.29.1] + buildable: false + externals: + - spec: git@2.29.1+tcltk + prefix: /usr/tce + perl: + version: [5.26.3] + buildable: false + externals: + - spec: perl@5.26.3 + prefix: /usr cuda: - version: [11.1.0, 11.0.2, 10.1.243, 10.1.168, 9.2.148, 8.0] + version: + - 11.1.0 + - 11.0.2 + - 10.1.243 + - 10.1.168 buildable: false externals: - spec: cuda@11.1.0~allow-unsupported-compilers @@ -21,11 +48,7 @@ packages: prefix: /usr/tce/packages/cuda/cuda-10.1.243 - spec: cuda@10.1.168+allow-unsupported-compilers prefix: /usr/tce/packages/cuda/cuda-10.1.168 - - spec: cuda@9.2.148~allow-unsupported-compilers - prefix: /usr/tce/packages/cuda/cuda-9.2.148 - - spec: cuda@8.0~allow-unsupported-compilers - prefix: /usr/tce/packages/cuda/cuda-8.0 - + spectrum-mpi: externals: - spec: spectrum-mpi@10.3.1.03rtm0%pgi@19.10 @@ -130,15 +153,15 @@ packages: prefix: /usr buildable: false tcl: - externals: - - spec: tcl@8.5.19 - prefix: /usr - buildable: false + externals: + - spec: tcl@8.5.19 + prefix: /usr + buildable: false tk: - externals: - - spec: tk@8.5.19 - prefix: /usr - buildable: false + externals: + - spec: tk@8.5.19 + prefix: /usr + buildable: false fontconfig: externals: - spec: fontconfig@2.13.1 diff --git a/scripts/spack/configs/config.yaml b/scripts/spack/configs/config.yaml index e1947f386..9c3264b74 100644 --- a/scripts/spack/configs/config.yaml +++ b/scripts/spack/configs/config.yaml @@ -14,55 +14,83 @@ # ~/.spack/config.yaml # ------------------------------------------------------------------------- config: - concretizer: "clingo" # This is the path to the root of the Spack install tree. # You can use $spack here to refer to the root of the spack instance. install_tree: root: $spack/opt/spack projections: - all: "${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}" + all: "{architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash}" + # install_tree can include an optional padded length (int or boolean) + # default is False (do not pad) + # if padded_length is True, Spack will pad as close to the system max path + # length as possible + # if padded_length is an integer, Spack will pad to that many characters, + # assuming it is higher than the length of the install_tree root. padded_length: 128 -# Locations where templates should be found + + # Locations where templates should be found template_dirs: - $spack/templates - # Locations where different types of modules should be installed. - #module_roots: - # tcl: $spack/share/spack/modules - # lmod: $spack/share/spack/lmod - + # Directory where licenses should be located + license_dir: $spack/etc/spack/licenses # Temporary locations Spack can try to use for builds. # - # Spack will use the first one it finds that exists and is writable. - # You can use $tempdir to refer to the system default temp directory - # (as returned by tempfile.gettempdir()). + # Recommended options are given below. + # + # Builds can be faster in temporary directories on some (e.g., HPC) systems. + # Specifying `$tempdir` will ensure use of the default temporary directory + # (i.e., ``$TMP` or ``$TMPDIR``). + # + # Another option that prevents conflicts and potential permission issues is + # to specify `$user_cache_path/stage`, which ensures each user builds in their + # home directory. + # + # A more traditional path uses the value of `$spack/var/spack/stage`, which + # builds directly inside Spack's instance without staging them in a + # temporary space. Problems with specifying a path inside a Spack instance + # are that it precludes its use as a system package and its ability to be + # pip installable. + # + # In Spack environment files, chaining onto existing system Spack + # installations, the $env variable can be used to download, cache and build + # into user-writable paths that are relative to the currently active + # environment. # - # A value of $spack/var/spack/stage indicates that Spack should run - # builds directly inside its install directory without staging them in - # temporary space. + # In any case, if the username is not already in the path, Spack will append + # the value of `$user` in an attempt to avoid potential conflicts between + # users in shared temporary spaces. # - # The build stage can be purged with `spack purge --stage`. + # The build stage can be purged with `spack clean --stage` and + # `spack clean -a`, so it is important that the specified directory uniquely + # identifies Spack staging to avoid accidentally wiping out non-Spack work. build_stage: - # skipping tempdir b/c running mpi tests fails with local fs - # - $tempdir - $spack/../builds + # Directory in which to run tests and store test results. + # Tests will be stored in directories named by date/time and package + # name/hash. + test_stage: $user_cache_path/test - # Cache directory already downloaded source tarballs and archived - # repositories. This can be purged with `spack purge --downloads`. + # Cache directory for already downloaded source tarballs and archived + # repositories. This can be purged with `spack clean --downloads`. source_cache: $spack/var/spack/cache + ## Directory where spack managed environments are created and stored + # environments_root: $spack/var/spack/environments + + # Cache directory for miscellaneous files, like the package index. - # This can be purged with `spack purge --misc-cache` - misc_cache: .spack/misccache + # This can be purged with `spack clean --misc-cache` + misc_cache: $spack/misccache # Timeout in seconds used for downloading sources etc. This only applies # to the connection phase and can be increased for slow connections or - # servers. 0 means no timeout. Default(10). + # servers. 0 means no timeout. connect_timeout: 60 @@ -71,18 +99,139 @@ config: verify_ssl: true + # Suppress gpg warnings from binary package verification + # Only suppresses warnings, gpg failure will still fail the install + # Potential rationale to set True: users have already explicitly trusted the + # gpg key they are using, and may not want to see repeated warnings that it + # is self-signed or something of the sort. + suppress_gpg_warnings: false + + + # If set to true, Spack will attempt to build any compiler on the spec + # that is not already available. If set to False, Spack will only use + # compilers already configured in compilers.yaml + install_missing_compilers: false + + # If set to true, Spack will always check checksums after downloading # archives. If false, Spack skips the checksum step. checksum: true + # If set to true, Spack will fetch deprecated versions without warning. + # If false, Spack will raise an error when trying to install a deprecated version. + deprecated: true + + # If set to true, `spack install` and friends will NOT clean # potentially harmful variables from the build environment. Use wisely. dirty: false - # The default number of jobs to use when running `make` in parallel. - # If set to 4, for example, `spack install` will run `make -j4`. - # If not set, all available cores are used by default. - # for uberenv, limit build_jobs to 8 + # The language the build environment will use. This will produce English + # compiler messages by default, so the log parser can highlight errors. + # If set to C, it will use English (see man locale). + # If set to the empty string (''), it will use the language from the + # user's environment. + build_language: C + + + # When set to true, concurrent instances of Spack will use locks to + # avoid modifying the install tree, database file, etc. If false, Spack + # will disable all locking, but you must NOT run concurrent instances + # of Spack. For filesystems that don't support locking, you should set + # this to false and run one Spack at a time, but otherwise we recommend + # enabling locks. + locks: true + + # The default url fetch method to use. + # If set to 'curl', Spack will require curl on the user's system + # If set to 'urllib', Spack will use python built-in libs to fetch + url_fetch_method: urllib + + # The maximum number of jobs to use for the build system (e.g. `make`), when + # the -j flag is not given on the command line. Defaults to 16 when not set. + # Note that the maximum number of jobs is limited by the number of cores + # available, taking thread affinity into account when supported. For instance: + # - With `build_jobs: 16` and 4 cores available `spack install` will run `make -j4` + # - With `build_jobs: 16` and 32 cores available `spack install` will run `make -j16` + # - With `build_jobs: 2` and 4 cores available `spack install -j6` will run `make -j6` build_jobs: 54 + + + # If set to true, Spack will use ccache to cache C compiles. + ccache: false + + + # The concretization algorithm to use in Spack. Options are: + # + # 'clingo': Uses a logic solver under the hood to solve DAGs with full + # backtracking and optimization for user preferences. Spack will + # try to bootstrap the logic solver, if not already available. + # + # 'original': Spack's original greedy, fixed-point concretizer. This + # algorithm can make decisions too early and will not backtrack + # sufficiently for many specs. This will soon be deprecated in + # favor of clingo. + # + # See `concretizer.yaml` for more settings you can fine-tune when + # using clingo. + concretizer: clingo + + + # How long to wait to lock the Spack installation database. This lock is used + # when Spack needs to manage its own package metadata and all operations are + # expected to complete within the default time limit. The timeout should + # therefore generally be left untouched. + db_lock_timeout: 60 + + + # How long to wait when attempting to modify a package (e.g. to install it). + # This value should typically be 'null' (never time out) unless the Spack + # instance only ever has a single user at a time, and only if the user + # anticipates that a significant delay indicates that the lock attempt will + # never succeed. + package_lock_timeout: null + + + # Control how shared libraries are located at runtime on Linux. See the + # the Spack documentation for details. + shared_linking: + # Spack automatically embeds runtime search paths in ELF binaries for their + # dependencies. Their type can either be "rpath" or "runpath". For glibc, rpath is + # inherited and has precedence over LD_LIBRARY_PATH; runpath is not inherited + # and of lower precedence. DO NOT MIX these within the same install tree. + type: rpath + + + # (Experimental) Embed absolute paths of dependent libraries directly in ELF + # binaries to avoid runtime search. This can improve startup time of + # executables with many dependencies, in particular on slow filesystems. + bind: false + + + # Set to 'false' to allow installation on filesystems that doesn't allow setgid bit + # manipulation by unprivileged user (e.g. AFS) + allow_sgid: true + + # Whether to show status information during building and installing packages. + # This gives information about Spack's current progress as well as the current + # and total number of packages. Information is shown both in the terminal + # title and inline. + install_status: true + + # Number of seconds a buildcache's index.json is cached locally before probing + # for updates, within a single Spack invocation. Defaults to 10 minutes. + binary_index_ttl: 600 + + flags: + # Whether to keep -Werror flags active in package builds. + keep_werror: 'none' + + # A mapping of aliases that can be used to define new commands. For instance, + # `sp: spec -I` will define a new command `sp` that will execute `spec` with + # the `-I` argument. Aliases cannot override existing commands. + aliases: + concretise: concretize + containerise: containerize + rm: remove diff --git a/scripts/spack/configs/generic/concretizer.yaml b/scripts/spack/configs/generic/concretizer.yaml new file mode 100644 index 000000000..4e6375b59 --- /dev/null +++ b/scripts/spack/configs/generic/concretizer.yaml @@ -0,0 +1,36 @@ +# ------------------------------------------------------------------------- +# This is the default spack configuration file. +# +# Settings here are versioned with Spack and are intended to provide +# sensible defaults out of the box. Spack maintainers should edit this +# file to keep it current. +# +# Users can override these settings by editing +# `$SPACK_ROOT/etc/spack/concretizer.yaml`, `~/.spack/concretizer.yaml`, +# or by adding a `concretizer:` section to an environment. +# ------------------------------------------------------------------------- +concretizer: + # Whether to consider installed packages or packages from buildcaches when + # concretizing specs. If `true`, we'll try to use as many installs/binaries + # as possible, rather than building. If `false`, we'll always give you a fresh + # concretization. + reuse: true + # Options that tune which targets are considered for concretization. The + # concretization process is very sensitive to the number targets, and the time + # needed to reach a solution increases noticeably with the number of targets + # considered. + targets: + # Determine whether we want to target specific or generic microarchitectures. + # An example of the first kind might be for instance "skylake" or "bulldozer", + # while generic microarchitectures are for instance "aarch64" or "x86_64_v4". + granularity: generic + # If "false" allow targets that are incompatible with the current host (for + # instance concretize with target "icelake" while running on "haswell"). + # If "true" only allow targets that are compatible with the host. + host_compatible: true + # When "true" concretize root specs of environments together, so that each unique + # package in an environment corresponds to one concrete spec. This ensures + # environments can always be activated. When "false" perform concretization separately + # on each root spec, allowing different versions and variants of the same package in + # an environment. + unify: true diff --git a/scripts/spack/configs/x86_64/packages.yaml b/scripts/spack/configs/generic/packages.yaml similarity index 84% rename from scripts/spack/configs/x86_64/packages.yaml rename to scripts/spack/configs/generic/packages.yaml index 3a3096541..3c10f211c 100644 --- a/scripts/spack/configs/x86_64/packages.yaml +++ b/scripts/spack/configs/generic/packages.yaml @@ -4,17 +4,6 @@ packages: - spec: mpich@3.3.2 prefix: /usr buildable: false - #openmpi: - # externals: - # - spec: openmpi@4.1.2 - # prefix: /usr/ - # buildable: false - - #cmake: - # externals: - # - spec: cmake@3.16.3 - # prefix: /usr - # buildable: false # ------ SYSTEM LIBS ------- ncurses: @@ -112,13 +101,8 @@ packages: - spec: fontconfig@2.13.1 prefix: /usr buildable: false - # netlib-lapack: - # externals: - # - spec: netlib-lapack@3.9.0 - # prefix: /usr - # buildable: false - liblapack64-dev: + netlib-lapack: externals: - - spec: liblapack64-dev@3.9.0 + - spec: netlib-lapack@3.9.0 prefix: /usr buildable: false diff --git a/scripts/spack/configs/toss_4_x86_64_ib/packages.yaml b/scripts/spack/configs/toss_4_x86_64_ib/packages.yaml index 17d02a6c3..d843aae97 100644 --- a/scripts/spack/configs/toss_4_x86_64_ib/packages.yaml +++ b/scripts/spack/configs/toss_4_x86_64_ib/packages.yaml @@ -4,18 +4,46 @@ packages: # us to run on broadwell as well target: [ivybridge] compiler: [gcc, clang] + providers: + blas: [openblas] + lapack: [netlib-lapack] + mpi: [mvapich2] + pkgconfig: [pkg-config] + pil: [py-pillow] + zlib-api: [zlib] cmake: version: [3.23.1] buildable: false externals: - spec: cmake@3.23.1 prefix: /usr/tce/packages/cmake/cmake-3.23.1 + gmake: + version: [4.2.1] + buildable: false + externals: + - spec: gmake@4.2.1 + prefix: /usr + git: + version: [2.29.1] + buildable: false + externals: + - spec: git@2.29.1+tcltk + prefix: /usr/tce + perl: + version: [5.26.3] + buildable: false + externals: + - spec: perl@5.26.3 + prefix: /usr mvapich2: externals: - spec: mvapich2@2.3.6%gcc@10.3.1~cuda~debug~regcache~wrapperrpath ch3_rank_bits=32 file_systems=lustre,nfs,ufs process_managers=slurm prefix: /usr/tce/packages/mvapich2/mvapich2-2.3.6-gcc-10.3.1 + - spec: mvapich2@2.3.6%clang@14.0.6~alloca~cuda~debug+regcache+wrapperrpath ch3_rank_bits=32 + fabrics=mrail file_systems=auto process_managers=slurm + prefix: /usr/tce/packages/mvapich2/mvapich2-2.3.6-clang-14.0.6 buildable: false # ------ SYSTEM LIBS ------- @@ -128,3 +156,8 @@ packages: externals: - spec: netlib-lapack@3.9.0 prefix: /usr + openblas: + buildable: false + externals: + - prefix: /usr + spec: openblas@0.3.15 diff --git a/scripts/spack/configs/ubuntu20.04/concretizer.yaml b/scripts/spack/configs/ubuntu20.04/concretizer.yaml new file mode 100644 index 000000000..4e6375b59 --- /dev/null +++ b/scripts/spack/configs/ubuntu20.04/concretizer.yaml @@ -0,0 +1,36 @@ +# ------------------------------------------------------------------------- +# This is the default spack configuration file. +# +# Settings here are versioned with Spack and are intended to provide +# sensible defaults out of the box. Spack maintainers should edit this +# file to keep it current. +# +# Users can override these settings by editing +# `$SPACK_ROOT/etc/spack/concretizer.yaml`, `~/.spack/concretizer.yaml`, +# or by adding a `concretizer:` section to an environment. +# ------------------------------------------------------------------------- +concretizer: + # Whether to consider installed packages or packages from buildcaches when + # concretizing specs. If `true`, we'll try to use as many installs/binaries + # as possible, rather than building. If `false`, we'll always give you a fresh + # concretization. + reuse: true + # Options that tune which targets are considered for concretization. The + # concretization process is very sensitive to the number targets, and the time + # needed to reach a solution increases noticeably with the number of targets + # considered. + targets: + # Determine whether we want to target specific or generic microarchitectures. + # An example of the first kind might be for instance "skylake" or "bulldozer", + # while generic microarchitectures are for instance "aarch64" or "x86_64_v4". + granularity: generic + # If "false" allow targets that are incompatible with the current host (for + # instance concretize with target "icelake" while running on "haswell"). + # If "true" only allow targets that are compatible with the host. + host_compatible: true + # When "true" concretize root specs of environments together, so that each unique + # package in an environment corresponds to one concrete spec. This ensures + # environments can always be activated. When "false" perform concretization separately + # on each root spec, allowing different versions and variants of the same package in + # an environment. + unify: true diff --git a/scripts/spack/configs/ubuntu20.04/packages.yaml b/scripts/spack/configs/ubuntu20.04/packages.yaml new file mode 100644 index 000000000..a5e240854 --- /dev/null +++ b/scripts/spack/configs/ubuntu20.04/packages.yaml @@ -0,0 +1,113 @@ +packages: + mpich: + externals: + - spec: mpich@3.3.2 + prefix: /usr + buildable: false + all: + providers: + mpi: [mpich] + blas: [netlib-lapack] + lapack: [netlib-lapack] +# ------ SYSTEM LIBS ------- +# FIXME: Currently allowing spack to build cmake and git + ncurses: + externals: + - spec: ncurses@6.2 + prefix: /usr + buildable: false + perl: + buildable: false + externals: + - spec: perl@5.30.0 + prefix: /usr + readline: + externals: + - spec: readline@8.0 + prefix: /usr + buildable: false + autoconf: + externals: + - spec: autoconf@2.69 + prefix: /usr + buildable: false + automake: + externals: + - spec: automake@1.16.1 + prefix: /usr + buildable: false + libtool: + externals: + - spec: libtool@2.4.6 + prefix: /usr + buildable: false + bzip2: + externals: + - spec: bzip2@1.0.8 + prefix: /usr + buildable: false + expat: + externals: + - spec: expat@2.2.9 + prefix: /usr + buildable: false + gdbm: + externals: + - spec: gdbm@1.18.1 + prefix: /usr + buildable: false + gettext: + externals: + - spec: gettext@0.19.8.1 + prefix: /usr + buildable: false + libffi: + externals: + - spec: libffi@3.3 + prefix: /usr + buildable: false + openssl: + externals: + - spec: openssl@1.1.1 + prefix: /usr + buildable: false + ossp-uuid: + externals: + - spec: ossp-uuid@1.6.2 + prefix: /usr + buildable: false + sqlite: + externals: + - spec: sqlite@2.8.17 + prefix: /usr + buildable: false + pkg-config: + externals: + - spec: pkg-config@0.29.1 + prefix: /usr + buildable: false + tar: + externals: + - spec: tar@1.30 + prefix: /usr + buildable: false + elfutils: + externals: + - spec: elfutils@0.176 + prefix: /usr + buildable: false + tcl: + externals: + - spec: tcl@8.6.9 + prefix: /usr + buildable: false + tk: + externals: + - spec: tk@8.6.9 + prefix: /usr + buildable: false + fontconfig: + externals: + - spec: fontconfig@2.13.1 + prefix: /usr + buildable: false diff --git a/scripts/spack/packages/caliper/package.py b/scripts/spack/packages/caliper/package.py index 6a23f71d1..48aa10dd3 100644 --- a/scripts/spack/packages/caliper/package.py +++ b/scripts/spack/packages/caliper/package.py @@ -1,4 +1,4 @@ -# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other +# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -6,8 +6,6 @@ import os import sys -from llnl.util import tty - from spack.package import * @@ -20,32 +18,68 @@ class Caliper(CMakePackage, CudaPackage, ROCmPackage): homepage = "https://github.com/LLNL/Caliper" git = "https://github.com/LLNL/Caliper.git" - url = "https://github.com/LLNL/Caliper/archive/v2.8.0.tar.gz" + url = "https://github.com/LLNL/Caliper/archive/v2.11.0.tar.gz" tags = ["e4s", "radiuss"] - maintainers = ["daboehme"] + maintainers("daboehme") test_requires_compiler = True + license("BSD-3-Clause") + version("master", branch="master") + version("2.11.0", sha256="b86b733cbb73495d5f3fe06e6a9885ec77365c8aa9195e7654581180adc2217c") + version("2.10.0", sha256="14c4fb5edd5e67808d581523b4f8f05ace8549698c0e90d84b53171a77f58565") + version("2.9.1", sha256="4771d630de505eff9227e0ec498d0da33ae6f9c34df23cb201b56181b8759e9e") + version("2.9.0", sha256="507ea74be64a2dfd111b292c24c4f55f459257528ba51a5242313fa50978371f") version("2.8.0", sha256="17807b364b5ac4b05997ead41bd173e773f9a26ff573ff2fe61e0e70eab496e4") - version("2.7.0", sha256="b3bf290ec2692284c6b4f54cc0c507b5700c536571d3e1a66e56626618024b2b") - version("2.6.0", sha256="6efcd3e4845cc9a6169e0d934840766b12182c6d09aa3ceca4ae776e23b6360f") - version("2.5.0", sha256="d553e60697d61c53de369b9ca464eb30710bda90fba9671201543b64eeac943c") - version("2.4.0", tag="v2.4.0") - version("2.3.0", tag="v2.3.0") - version("2.2.0", tag="v2.2.0") - version("2.1.1", tag="v2.1.1") - version("2.0.1", tag="v2.0.1") - version("1.9.1", tag="v1.9.1") - version("1.9.0", tag="v1.9.0") - version("1.8.0", tag="v1.8.0") - version("1.7.0", tag="v1.7.0") + version( + "2.7.0", + sha256="b3bf290ec2692284c6b4f54cc0c507b5700c536571d3e1a66e56626618024b2b", + deprecated=True, + ) + version( + "2.6.0", + sha256="6efcd3e4845cc9a6169e0d934840766b12182c6d09aa3ceca4ae776e23b6360f", + deprecated=True, + ) + version( + "2.5.0", + sha256="d553e60697d61c53de369b9ca464eb30710bda90fba9671201543b64eeac943c", + deprecated=True, + ) + version( + "2.4.0", tag="v2.4.0", commit="30577b4b8beae104b2b35ed487fec52590a99b3d", deprecated=True + ) + version( + "2.3.0", tag="v2.3.0", commit="9fd89bb0120750d1f9dfe37bd963e24e478a2a20", deprecated=True + ) + version( + "2.2.0", tag="v2.2.0", commit="c408e9b3642c7aa80eff37b0826d819c57e7bc04", deprecated=True + ) + version( + "2.1.1", tag="v2.1.1", commit="0593b0e01c1d8d3e50c990399cc0fee403485599", deprecated=True + ) + version( + "2.0.1", tag="v2.0.1", commit="4d7ff46381c53a461e62edd949e2d9dea9db7b08", deprecated=True + ) + version( + "1.9.1", tag="v1.9.1", commit="cfc1defbbee20b50dd3e3477badd09a92b1df970", deprecated=True + ) + version( + "1.9.0", tag="v1.9.0", commit="8356e747349b285aa621c5b74e71559f0babc4a1", deprecated=True + ) + version( + "1.8.0", tag="v1.8.0", commit="117c1ef596b617dc71407b8b67eebef094a654f8", deprecated=True + ) + version( + "1.7.0", tag="v1.7.0", commit="898277c93d884d4e7ca1ffcf3bbea81d22364f26", deprecated=True + ) is_linux = sys.platform.startswith("linux") variant("shared", default=True, description="Build shared libraries") variant("adiak", default=True, description="Enable Adiak support") - variant("mpi", default=True, description="Enable MPI wrappers") + variant("mpi", default=True, description="Enable MPI support") # libunwind has some issues on Mac variant( "libunwind", default=sys.platform != "darwin", description="Enable stack unwind support" @@ -59,18 +93,23 @@ class Caliper(CMakePackage, CudaPackage, ROCmPackage): variant("sampler", default=is_linux, description="Enable sampling support on Linux") variant("sosflow", default=False, description="Enable SOSflow support") variant("fortran", default=False, description="Enable Fortran support") - variant("pic", default=True, description="Produce position-independent code (for shared libs)") + variant("variorum", default=False, description="Enable Variorum support") + variant("kokkos", default=True, when="@2.3.0:", description="Enable Kokkos profiling support") + variant("tests", default=False, description="Enable tests") + variant("pic", default=True, description="Turn on -fPIC") - depends_on("adiak@0.1:0", when="@2.2: +adiak") + depends_on("adiak@0.1:0", when="@2.2:2.10 +adiak") + depends_on("adiak@0.4:0", when="@2.11: +adiak") depends_on("papi@5.3:5", when="@:2.2 +papi") - depends_on("papi@5.3:6", when="@2.3: +papi") + depends_on("papi@5.3:", when="@2.3: +papi") depends_on("libpfm4@4.8:4", when="+libpfm") depends_on("mpi", when="+mpi") depends_on("unwind@1.2:1", when="+libunwind") depends_on("elfutils", when="+libdw") + depends_on("variorum", when="+variorum") depends_on("sosflow@spack", when="@1.0:1+sosflow") @@ -78,24 +117,27 @@ class Caliper(CMakePackage, CudaPackage, ROCmPackage): depends_on("python", type="build") # sosflow support not yet in 2.0 - conflicts("+sosflow", "@2.0.0:2.8") + conflicts("+sosflow", "@2.0.0:2.11") conflicts("+adiak", "@:2.1") conflicts("+libdw", "@:2.4") conflicts("+rocm", "@:2.7") conflicts("+rocm+cuda") patch("for_aarch64.patch", when="target=aarch64:") + patch( + "sampler-service-missing-libunwind-include-dir.patch", + when="@2.9.0:2.9.1 +libunwind +sampler", + ) def setup_build_environment(self, env): if '+pic' in self.spec: env.append_flags('CFLAGS', self.compiler.cc_pic_flag) env.append_flags('CXXFLAGS', self.compiler.cxx_pic_flag) - + def cmake_args(self): spec = self.spec args = [ - ("-DPYTHON_EXECUTABLE=%s" % spec["python"].command.path), "-DBUILD_TESTING=Off", "-DBUILD_DOCS=Off", self.define_from_variant("BUILD_SHARED_LIBS", "shared"), @@ -112,7 +154,9 @@ def cmake_args(self): self.define_from_variant("WITH_NVTX", "cuda"), self.define_from_variant("WITH_ROCTRACER", "rocm"), self.define_from_variant("WITH_ROCTX", "rocm"), - self.define_from_variant("WITH_PIC", "pic"), + self.define_from_variant("WITH_VARIORUM", "variorum"), + self.define_from_variant("WITH_KOKKOS", "kokkos"), + self.define_from_variant("WITH_PIC", "pic") ] if "+papi" in spec: @@ -123,6 +167,8 @@ def cmake_args(self): args.append("-DLIBPFM_INSTALL=%s" % spec["libpfm4"].prefix) if "+sosflow" in spec: args.append("-DSOS_PREFIX=%s" % spec["sosflow"].prefix) + if "+variorum" in spec: + args.append("-DVARIORUM_PREFIX=%s" % spec["variorum"].prefix) # -DWITH_CALLPATH was renamed -DWITH_LIBUNWIND in 2.5 callpath_flag = "LIBUNWIND" if spec.satisfies("@2.5:") else "CALLPATH" @@ -143,6 +189,7 @@ def cmake_args(self): args.append("-DCUPTI_PREFIX=%s" % spec["cuda"].prefix) if "+rocm" in spec: + args.append("-DCMAKE_CXX_COMPILER={0}".format(spec["hip"].hipcc)) args.append("-DROCM_PREFIX=%s" % spec["hsa-rocr-dev"].prefix) if "+pic" in spec: @@ -156,44 +203,31 @@ def cache_test_sources(self): install test subdirectory for use during `spack test run`.""" self.cache_extra_test_sources([join_path("examples", "apps")]) - def run_cxx_example_test(self): - """Run stand alone test: cxx_example""" + def test_cxx_example(self): + """build and run cxx-example""" - test_dir = self.test_suite.current_test_cache_dir.examples.apps exe = "cxx-example" - source_file = "cxx-example.cpp" - - if not os.path.isfile(join_path(test_dir, source_file)): - tty.warn("Skipping caliper test:" "{0} does not exist".format(source_file)) - return - - if os.path.exists(self.prefix.lib): - lib_dir = self.prefix.lib - else: - lib_dir = self.prefix.lib64 - - options = [ - "-L{0}".format(lib_dir), - "-I{0}".format(self.prefix.include), - "{0}".format(join_path(test_dir, source_file)), - "-o", - exe, - "-std=c++11", - "-lcaliper", - "-lstdc++", - ] - - if not self.run_test( - exe=os.environ["CXX"], - options=options, - purpose="test: compile {0} example".format(exe), - work_dir=test_dir, - ): - tty.warn("Skipping caliper test: failed to compile example") - return - - if not self.run_test(exe, purpose="test: run {0} example".format(exe), work_dir=test_dir): - tty.warn("Skipping caliper test: failed to run example") - - def test(self): - self.run_cxx_example_test() + source_file = "{0}.cpp".format(exe) + + source_path = find_required_file( + self.test_suite.current_test_cache_dir, source_file, expected=1, recursive=True + ) + + lib_dir = self.prefix.lib if os.path.exists(self.prefix.lib) else self.prefix.lib64 + + cxx = which(os.environ["CXX"]) + test_dir = os.path.dirname(source_path) + with working_dir(test_dir): + cxx( + "-L{0}".format(lib_dir), + "-I{0}".format(self.prefix.include), + source_path, + "-o", + exe, + "-std=c++11", + "-lcaliper", + "-lstdc++", + ) + + cxx_example = which(exe) + cxx_example() diff --git a/scripts/spack/packages/m-aneos/package.py b/scripts/spack/packages/m-aneos/package.py index 3d03dc05c..4bfac8bad 100644 --- a/scripts/spack/packages/m-aneos/package.py +++ b/scripts/spack/packages/m-aneos/package.py @@ -3,7 +3,7 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -from spack import * +from spack.package import * class MAneos(MakefilePackage): diff --git a/scripts/spack/packages/opensubdiv/package.py b/scripts/spack/packages/opensubdiv/package.py index b8e17a1c2..190f81b6b 100644 --- a/scripts/spack/packages/opensubdiv/package.py +++ b/scripts/spack/packages/opensubdiv/package.py @@ -28,15 +28,15 @@ def url_for_version(self, version): variant('tbb', default=False, description='Builds with Intel TBB support') variant('openmp', default=False, description='Builds with OpenMP support') - variant('doc', default=False, description='Builds documentation. Requires Python 2') + #variant('doc', default=False, description='Builds documentation. Requires Python 2') variant('pic', default=True, description='Produce position-independent code (for shared libs)') depends_on('cmake@2.8.6:', type='build') - depends_on('graphviz', type='build', when='+doc') - depends_on('doxygen', type='build', when='+doc') - depends_on('py-docutils', type='build', when='+doc') - depends_on('python@2.6:2', type='build', when='+doc') + # depends_on('graphviz', type='build', when='+doc') + # depends_on('doxygen', type='build', when='+doc') + # depends_on('py-docutils', type='build', when='+doc') + # depends_on('python@2.6:2', type='build', when='+doc') #depends_on('gl') #depends_on('glew@1.9.0:') #depends_on('glfw@3.0.0:') diff --git a/scripts/spack/packages/polytope/package.py b/scripts/spack/packages/polytope/package.py index 5a5993a9c..838af8444 100644 --- a/scripts/spack/packages/polytope/package.py +++ b/scripts/spack/packages/polytope/package.py @@ -20,6 +20,7 @@ class Polytope(CMakePackage): depends_on('python@3: +zlib +shared', type=('build', 'run'), when='+python') depends_on('py-decorator', type=('build', 'run'), when='+python') depends_on('boost', type=('build', 'run')) + patch('polytope_cxx.patch', when='^boost@1.82:') parallel = False # Should be able to remove this at some point diff --git a/scripts/spack/packages/polytope/polytope_cxx.patch b/scripts/spack/packages/polytope/polytope_cxx.patch new file mode 100644 index 000000000..d473d6788 --- /dev/null +++ b/scripts/spack/packages/polytope/polytope_cxx.patch @@ -0,0 +1,11 @@ +--- a/CMakeLists.txt 2024-04-18 13:16:02.394111000 -0700 ++++ b/CMakeLists.txt 2024-04-18 13:15:58.611132000 -0700 +@@ -10,7 +10,7 @@ + project(Polytope) + + # We require at least C++11 +-set(CMAKE_CXX_STANDARD 11) ++set(CMAKE_CXX_STANDARD 14) + set(CMAKE_CXX_STANDARD_REQUIRED ON) + set(CMAKE_CXX_EXTENSIONS OFF) + diff --git a/scripts/spack/packages/py-numpy-stl/package.py b/scripts/spack/packages/py-numpy-stl/package.py deleted file mode 100644 index 13593cbbf..000000000 --- a/scripts/spack/packages/py-numpy-stl/package.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other -# Spack Project Developers. See the top-level COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - -from spack import * - - -class PyNumpyStl(PythonPackage): - """""" - - homepage = "https://pypi.org/project/numpy-stl/" - pypi = "numpy-stl/numpy-stl-3.0.0.tar.gz" - - version('3.0.0', sha256='578b78eacb0529ac9aba2f17dcc363d58c7c3c5708710c18f8c1e9965f2e81ac') - - extends('python@3:', type=['build', 'run']) - depends_on('py-setuptools', type='build') diff --git a/scripts/spack/packages/py-pillow/package.py b/scripts/spack/packages/py-pillow/package.py deleted file mode 100644 index 2bb5c5a4e..000000000 --- a/scripts/spack/packages/py-pillow/package.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other -# Spack Project Developers. See the top-level COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - -from spack.package import * - - -class PyPillowBase(PythonPackage): - """Base class for Pillow and its fork Pillow-SIMD.""" - - maintainers = ["adamjstewart"] - - provides("pil") - - # These defaults correspond to Pillow defaults - # https://pillow.readthedocs.io/en/stable/installation.html#external-libraries - VARIANTS_IN_SETUP_CFG = [ - "zlib", - "jpeg", - "tiff", - "freetype", - "lcms", - "webp", - "webpmux", - "jpeg2000", - "imagequant", - "xcb" - ] - variant("zlib", default=True, description="Compressed PNG functionality") - variant("jpeg", default=True, description="JPEG functionality") - variant("tiff", default=False, description="Compressed TIFF functionality") - variant("freetype", default=False, description="Type related services") - variant("lcms", default=False, description="Color management") - variant("webp", default=False, description="WebP format") - variant("webpmux", when="+webp", default=False, description="WebP metadata") - variant("jpeg2000", default=False, description="JPEG 2000 functionality") - variant("imagequant", when="@3.3:", default=False, description="Improved color quantization") - variant("xcb", when="@7.1:", default=False, description="X11 screengrab support") - variant("raqm", when="@8.2:", default=False, description="RAQM support") - - # Required dependencies - # https://pillow.readthedocs.io/en/latest/installation.html#notes - depends_on("python@3.7:3.10", when="@9:", type=("build", "run")) - depends_on("python@3.6:3.10", when="@8.3.2:8.4", type=("build", "run")) - depends_on("python@3.6:3.9", when="@8:8.3.1", type=("build", "run")) - depends_on("python@3.5:3.8", when="@7.0:7.2", type=("build", "run")) - depends_on("python@2.7:2.8,3.5:3.8", when="@6.2.1:6.2.2", type=("build", "run")) - depends_on("python@2.7:2.8,3.5:3.7", when="@6.0:6.2.0", type=("build", "run")) - depends_on("python@2.7:2.8,3.4:3.7", when="@5.2:5.4", type=("build", "run")) - depends_on("python@2.7:2.8,3.4:3.6", when="@5.0:5.1", type=("build", "run")) - depends_on("python@2.7:2.8,3.3:3.6", when="@4.0:4", type=("build", "run")) - depends_on("python@2.6:2.8,3.2:3.5", when="@2:3", type=("build", "run")) - depends_on("python@2.4:2.7", when="@:1", type=("build", "run")) - depends_on("py-setuptools", type="build") - - # Optional dependencies - depends_on("zlib", when="+zlib") - depends_on("jpeg", when="+jpeg") - depends_on("libtiff", when="+tiff") - depends_on("freetype", when="+freetype") - depends_on("lcms@2:", when="+lcms") - depends_on("libwebp", when="+webp") - depends_on("libwebp+libwebpmux+libwebpdemux", when="+webpmux") - depends_on("openjpeg", when="+jpeg2000") - depends_on("libimagequant", when="+imagequant") - depends_on("libxcb", when="+xcb") - depends_on("libraqm", when="+raqm") - - # Conflicting options - conflicts("+raqm", when="~freetype") - - def patch(self): - """Patch setup.py to provide library and include directories - for dependencies.""" - - if self.spec.satisfies("@:7.1.0"): - self.VARIANTS_IN_SETUP_CFG.remove("xcb") - - library_dirs = [] - include_dirs = [] - for dep in self.spec.dependencies(deptype="link"): - query = self.spec[dep.name] - library_dirs.extend(query.libs.directories) - include_dirs.extend(query.headers.directories) - - setup = FileFilter("setup.py") - setup.filter("library_dirs = []", "library_dirs = {0}".format(library_dirs), string=True) - setup.filter("include_dirs = []", "include_dirs = {0}".format(include_dirs), string=True) - - def variant_to_cfg(variant): - able = "enable" if "+" + variant in self.spec else "disable" - return "{0}_{1}=1\n".format(able, variant) - - with open("setup.cfg", "a") as setup: - setup.write("[build_ext]\n") - for variant in self.VARIANTS_IN_SETUP_CFG: - setup.write(variant_to_cfg(variant)) - - setup.write("rpath={0}\n".format(":".join(self.rpath))) - setup.write("[install]\n") - - def setup_build_environment(self, env): - env.set("MAX_CONCURRENCY", str(make_jobs)) - - -class PyPillow(PyPillowBase): - """Pillow is a fork of the Python Imaging Library (PIL). It adds image - processing capabilities to your Python interpreter. This library supports - many file formats, and provides powerful image processing and graphics - capabilities.""" - - homepage = "https://python-pillow.org/" - pypi = "Pillow/Pillow-7.2.0.tar.gz" - - version("9.2.0", sha256="75e636fd3e0fb872693f23ccb8a5ff2cd578801251f3a4f6854c6a5d437d3c04") - version("9.1.1", sha256="7502539939b53d7565f3d11d87c78e7ec900d3c72945d4ee0e2f250d598309a0") - version("9.1.0", sha256="f401ed2bbb155e1ade150ccc63db1a4f6c1909d3d378f7d1235a44e90d75fb97") - version("9.0.1", sha256="6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa") - version("9.0.0", sha256="ee6e2963e92762923956fe5d3479b1fdc3b76c83f290aad131a2f98c3df0593e") - version("8.4.0", sha256="b8e2f83c56e141920c39464b852de3719dfbfb6e3c99a2d8da0edf4fb33176ed") - version("8.0.0", sha256="59304c67d12394815331eda95ec892bf54ad95e0aa7bc1ccd8e0a4a5a25d4bf3") - version("7.2.0", sha256="97f9e7953a77d5a70f49b9a48da7776dc51e9b738151b22dacf101641594a626") - version("7.0.0", sha256="4d9ed9a64095e031435af120d3c910148067087541131e82b3e8db302f4c8946") - version("6.2.2", sha256="db9ff0c251ed066d367f53b64827cc9e18ccea001b986d08c265e53625dab950") - version("6.2.1", sha256="bf4e972a88f8841d8fdc6db1a75e0f8d763e66e3754b03006cbc3854d89f1cb1") - version("6.2.0", sha256="4548236844327a718ce3bb182ab32a16fa2050c61e334e959f554cac052fb0df") - version("6.0.0", sha256="809c0a2ce9032cbcd7b5313f71af4bdc5c8c771cb86eb7559afd954cab82ebb5") - version("5.4.1", sha256="5233664eadfa342c639b9b9977190d64ad7aca4edc51a966394d7e08e7f38a9f") - version("5.1.0", sha256="cee9bc75bff455d317b6947081df0824a8f118de2786dc3d74a3503fd631f4ef") - version("3.2.0", sha256="64b0a057210c480aea99406c9391180cd866fc0fd8f0b53367e3af21b195784a") - version("3.0.0", sha256="ad50bef540fe5518a4653c3820452a881b6a042cb0f8bb7657c491c6bd3654bb") - - for ver in [ - "9.2.0", - "9.1.1", - "9.1.0", - "9.0.1", - "9.0.0", - "8.4.0", - "8.0.0", - "7.2.0", - "7.0.0", - "6.2.2", - "6.2.1", - "6.2.0", - "6.0.0", - "5.4.1", - "5.1.0", - "3.2.0", - "3.0.0", - ]: - provides("pil@" + ver, when="@" + ver) diff --git a/scripts/spack/packages/py-pipreqs/package.py b/scripts/spack/packages/py-pipreqs/package.py deleted file mode 100644 index 8a7fe5eeb..000000000 --- a/scripts/spack/packages/py-pipreqs/package.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other -# Spack Project Developers. See the top-level COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - -from spack import * - - -class PyPipreqs(PythonPackage): - """""" - - homepage = "https://pypi.org/project/pipreqs/" - pypi = "pipreqs/pipreqs-0.4.10.tar.gz" - - #maintainers = ['mdavis36','jmikeowen'] - - version('0.4.11', sha256='c793b4e147ac437871b3a962c5ce467e129c859ece5ba79aca83c20f4d9c3aef') - - extends('python@3:', type=['build', 'run']) - depends_on("py-setuptools", type="build") diff --git a/scripts/spack/packages/qhull/package.py b/scripts/spack/packages/qhull/package.py index b735baa6d..be42294e0 100644 --- a/scripts/spack/packages/qhull/package.py +++ b/scripts/spack/packages/qhull/package.py @@ -3,7 +3,7 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -from spack import * +from spack.package import * class Qhull(CMakePackage): diff --git a/scripts/spack/packages/spheral/package.py b/scripts/spack/packages/spheral/package.py index 7f8fd6650..6a4ddc0bc 100644 --- a/scripts/spack/packages/spheral/package.py +++ b/scripts/spack/packages/spheral/package.py @@ -3,7 +3,7 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -from spack import * +from spack.package import * import socket import os @@ -19,7 +19,7 @@ class Spheral(CachedCMakePackage, CudaPackage): # ------------------------------------------------------------------------- # VERSIONS # ------------------------------------------------------------------------- - version('develop', branch='feature/spack', submodules=True) + version('develop', branch='develop', submodules=True) version('1.0', tag='FSISPH-v1.0', submodules=True) # ------------------------------------------------------------------------- @@ -34,13 +34,13 @@ class Spheral(CachedCMakePackage, CudaPackage): # DEPENDS # ------------------------------------------------------------------------- depends_on('mpi', when='+mpi') - depends_on('cmake@3.10.0:', type='build') - - depends_on('zlib@1.2.11 +shared +pic', type='build') + depends_on('cmake@3.18.0:', type='build') depends_on('boost@1.74.0 +system +filesystem -atomic -container -coroutine -chrono -context -date_time -exception -fiber -graph -iostreams -locale -log -math -mpi -program_options -python -random -regex -test -thread -timer -wave +pic', type='build') - depends_on('qhull@2020.1 +pic', type='build') + depends_on('zlib@1.3 +shared +pic', type='build') + + depends_on('qhull@2020.2 +pic', type='build') depends_on('m-aneos@1.0') depends_on('eigen@3.4.0', type='build') depends_on('hdf5@1.8.19 ~mpi +hl', type='build', when='~mpi') @@ -49,8 +49,10 @@ class Spheral(CachedCMakePackage, CudaPackage): depends_on('silo@4.10.2 +hdf5', type='build') # Zlib fix has been merged into conduit, using develop until next release. - depends_on('conduit@0.8.2 +shared +mpi +hdf5 -test ~parmetis', type='build', when='+mpi') - depends_on('conduit@0.8.2 +shared ~mpi +hdf5 -test ~parmetis', type='build', when='~mpi') + depends_on('conduit@0.8.2 +shared +mpi +hdf5~hdf5_compat -test ~parmetis', type='build', when='+mpi') + depends_on('conduit@0.8.2 +shared ~mpi +hdf5~hdf5_compat -test ~parmetis', type='build', when='~mpi') + depends_on('conduit@0.8.2 +shared +mpi +hdf5 -test ~parmetis', type='build', when='+mpi^hdf5@1.8.0:1.8') + depends_on('conduit@0.8.2 +shared ~mpi +hdf5 -test ~parmetis', type='build', when='~mpi^hdf5@1.8.0:1.8') depends_on('axom@0.7.0 ~shared +mpi +hdf5 -lua -examples -python -fortran -umpire -raja', type='build', when='+mpi') depends_on('axom@0.7.0 ~shared ~mpi +hdf5 -lua -examples -python -fortran -umpire -raja', type='build', when='~mpi') @@ -58,26 +60,22 @@ class Spheral(CachedCMakePackage, CudaPackage): depends_on('caliper@2.8.0 ~shared ~adiak ~libdw ~papi ~libunwind +pic', type='build') depends_on('opensubdiv@3.4.3', type='build') - depends_on('polytope@0.7 +python', type='build') + depends_on('polytope@0.7.3 +python', type='build') extends('python@3.9.10 +zlib +shared +ssl +tkinter', type='build') depends_on('py-numpy@1.23.4', type='build') depends_on('py-numpy-stl@3.0.0', type='build') - depends_on('py-python-utils@2.4.0', type='build') - depends_on('py-matplotlib@3.3.4 backend=tkagg +fonts', type='build') - depends_on('py-pillow@9.2.0', type='build') - depends_on('py-decorator@5.1.1', type='build') - depends_on('py-h5py@3.7.0', type='build') - depends_on('py-docutils@0.19', type='build') - depends_on('py-cython@0.29.32', type='build') - depends_on('py-scipy@1.8.1', type='build') - depends_on('py-importlib-metadata@4.12.0', type='build') + depends_on('py-pillow@9.5.0', type='build') + depends_on('py-matplotlib@3.7.4 backend=tkagg +fonts', type='build') + depends_on('py-h5py@3.9.0', type='build') + depends_on('py-docutils@0.18.1', type='build') + depends_on('py-scipy@1.12.0', type='build') depends_on('py-ats@exit', type='build') - depends_on('py-mpi4py@3.1.4', type='build', when='+mpi') + depends_on('py-mpi4py@3.1.5', type='build', when='+mpi') - depends_on('py-sphinx@5.3.0', type='build') - depends_on('py-sphinx-rtd-theme@0.5.1', type='build') + depends_on('py-sphinx', type='build') + depends_on('py-sphinx-rtd-theme', type='build') depends_on('netlib-lapack', type='build') @@ -100,13 +98,13 @@ def cache_name(self): hostname = hostname.rstrip('1234567890') envspec = os.environ.get("SPEC") + spec = self.spec if envspec: cache_spec = envspec else: - cache_spec = self.spec.compiler.name + "@" + self.spec.compiler.version - return "{1}-{2}.cmake".format( - hostname, - self._get_sys_type(self.spec), + cache_spec = str(spec.compiler.name) + "@" + str(spec.compiler.version) + return "{0}-{1}.cmake".format( + str(self._get_sys_type(spec)), cache_spec.replace(" ", "_") ) @@ -159,8 +157,6 @@ def initconfig_package_entries(self): entries.append(cmake_cache_path('python_DIR', spec['python'].prefix)) - entries.append(cmake_cache_path('zlib_DIR', spec['zlib'].prefix)) - entries.append(cmake_cache_path('boost_DIR', spec['boost'].prefix)) entries.append(cmake_cache_path('qhull_DIR', spec['qhull'].prefix)) @@ -203,3 +199,19 @@ def cmake_args(self): spec = self.spec return options + + @property + def build_dirname(self): + """Directory name to use when building the package.""" + return "spack-build-%s" % self.pkg.spec.dag_hash(7) + + @property + def build_directory(self): + """Full-path to the directory to use when building the package.""" + spec = self.spec + if spec.satisfies("@develop"): + dev_build_dir = "spack-build-" + str(spec.compiler.name) + "-" + str(spec.compiler.version) + return os.path.join(self.pkg.stage.source_path, build_dirname) + else: + return os.path.join(self.pkg.stage.path, self.build_dirname) + diff --git a/scripts/spack/packages/tcl/package.py b/scripts/spack/packages/tcl/package.py deleted file mode 100644 index 3f4b61f87..000000000 --- a/scripts/spack/packages/tcl/package.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other -# Spack Project Developers. See the top-level COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - -import os - -from spack.util.environment import is_system_path - - -class Tcl(AutotoolsPackage, SourceforgePackage): - """Tcl (Tool Command Language) is a very powerful but easy to learn dynamic - programming language, suitable for a very wide range of uses, including web and - desktop applications, networking, administration, testing and many more. Open source - and business-friendly, Tcl is a mature yet evolving language that is truly cross - platform, easily deployed and highly extensible.""" - - homepage = "https://www.tcl.tk/" - sourceforge_mirror_path = "tcl/tcl8.6.11-src.tar.gz" - - version('8.6.11', sha256='8c0486668586672c5693d7d95817cb05a18c5ecca2f40e2836b9578064088258') - version('8.6.10', sha256='5196dbf6638e3df8d5c87b5815c8c2b758496eb6f0e41446596c9a4e638d87ed') - version('8.6.8', sha256='c43cb0c1518ce42b00e7c8f6eaddd5195c53a98f94adc717234a65cbcfd3f96a') - version('8.6.6', sha256='a265409781e4b3edcc4ef822533071b34c3dc6790b893963809b9fe221befe07') - version('8.6.5', sha256='ce26d5b9c7504fc25d2f10ef0b82b14cf117315445b5afa9e673ed331830fb53') - version('8.6.4', sha256='9e6ed94c981c1d0c5f5fefb8112d06c6bf4d050a7327e95e71d417c416519c8d') - version('8.6.3', sha256='6ce0778de0d50daaa9c345d7c1fd1288fb658f674028812e7eeee992e3051005') - version('8.5.19', sha256='d3f04456da873d17f02efc30734b0300fb6c3b85028d445fe284b83253a6db18') - - extendable = True - - depends_on('zlib') - - configure_directory = 'unix' - - def install(self, spec, prefix): - with working_dir(self.build_directory): - make('install') - - # https://wiki.tcl-lang.org/page/kitgen - if self.spec.satisfies('@8.6:'): - make('install-headers') - - # Some applications like Expect require private Tcl headers. - make('install-private-headers') - - # Copy source to install tree - # A user-provided install option might re-do this - # https://github.com/spack/spack/pull/4102/files - installed_src = join_path( - self.spec.prefix, 'share', self.name, 'src') - stage_src = os.path.realpath(self.stage.source_path) - install_tree(stage_src, installed_src) - - # Replace stage dir -> installed src dir in tclConfig - filter_file( - stage_src, installed_src, - join_path(self.spec['tcl'].libs.directories[0], - 'tclConfig.sh')) - - # Don't install binaries in src/ tree - with working_dir(join_path(installed_src, self.configure_directory)): - make('clean') - - @run_after('install') - def symlink_tclsh(self): - with working_dir(self.prefix.bin): - symlink('tclsh{0}'.format(self.version.up_to(2)), 'tclsh') - - # ======================================================================== - # Set up environment to make install easy for tcl extensions. - # ======================================================================== - - @property - def libs(self): - return find_libraries(['libtcl{0}'.format(self.version.up_to(2))], - root=self.prefix, recursive=True) - - @property - def command(self): - """Returns the tclsh command. - - Returns: - Executable: the tclsh command - """ - # Although we symlink tclshX.Y to tclsh, we also need to support external - # installations that may not have this symlink, or may have multiple versions - # of Tcl installed in the same directory. - return Executable(os.path.realpath(self.prefix.bin.join( - 'tclsh{0}'.format(self.version.up_to(2))))) - - def _find_script_dir(self): - # Put more-specific prefixes first - check_prefixes = [ - join_path(self.prefix, "share", "tcl{0}".format(self.version.up_to(2))), - self.prefix, - ] - for prefix in check_prefixes: - result = find(prefix, "init.tcl") - if result: - return os.path.dirname(sorted(result)[0]) - - def setup_run_environment(self, env): - """Set TCL_LIBRARY to the directory containing init.tcl. - - For further info see: - - * https://wiki.tcl-lang.org/page/TCL_LIBRARY - """ - # When using tkinter from within spack provided python+tkinter, - # python will not be able to find Tcl unless TCL_LIBRARY is set. - env.set('TCL_LIBRARY', self._find_script_dir()) - #sorted(find(self.prefix, 'init.tcl'))[0])) - - def setup_dependent_build_environment(self, env, dependent_spec): - """Set TCL_LIBRARY to the directory containing init.tcl. - Set TCLLIBPATH to include the tcl-shipped directory for - extensions and any other tcl extension it depends on. - - For further info see: - - * https://wiki.tcl-lang.org/page/TCL_LIBRARY - * https://wiki.tcl-lang.org/page/TCLLIBPATH - """ - env.set('TCL_LIBRARY', self._find_script_dir()) - #sorted(find(self.prefix, 'init.tcl'))[0])) - - # If we set TCLLIBPATH, we must also ensure that the corresponding - # tcl is found in the build environment. This to prevent cases - # where a system provided tcl is run against the standard libraries - # of a Spack built tcl. See issue #7128 that relates to python but - # it boils down to the same situation we have here. - if not is_system_path(self.prefix.bin): - env.prepend_path('PATH', self.prefix.bin) - - # WARNING: paths in $TCLLIBPATH must be *space* separated, - # its value is meant to be a Tcl list, *not* an env list - # as explained here: https://wiki.tcl-lang.org/page/TCLLIBPATH: - # "TCLLIBPATH is a Tcl list, not some platform-specific - # colon-separated or semi-colon separated format" - - # WARNING: Tcl and Tcl extensions like Tk install their configuration files - # in subdirectories like `/lib/tcl8.6`. However, Tcl is aware of this, - # and $TCLLIBPATH should only contain `/lib`. $TCLLIBPATH is only needed - # because we install Tcl extensions to different directories than Tcl. See: - # https://core.tcl-lang.org/tk/tktview/447bd3e4abe17452d19a80e6840dcc8a2603fcbc - env.prepend_path( - 'TCLLIBPATH', self.spec['tcl'].libs.directories[0], separator=' ') - - for d in dependent_spec.traverse(deptype=('build', 'run', 'test')): - if d.package.extends(self.spec): - # Tcl libraries may be installed in lib or lib64, see #19546 - for lib in ['lib', 'lib64']: - tcllibpath = join_path(d.prefix, lib) - if os.path.exists(tcllibpath): - env.prepend_path('TCLLIBPATH', tcllibpath, separator=' ') - - def setup_dependent_run_environment(self, env, dependent_spec): - """Set TCLLIBPATH to include the tcl-shipped directory for - extensions and any other tcl extension it depends on. - - For further info see: - - * https://wiki.tcl-lang.org/page/TCLLIBPATH - """ - for d in dependent_spec.traverse(deptype=('build', 'run', 'test')): - if d.package.extends(self.spec): - # Tcl libraries may be installed in lib or lib64, see #19546 - for lib in ['lib', 'lib64']: - tcllibpath = join_path(d.prefix, lib) - if os.path.exists(tcllibpath): - env.prepend_path('TCLLIBPATH', tcllibpath, separator=' ') diff --git a/scripts/spack/packages/tk/package.py b/scripts/spack/packages/tk/package.py deleted file mode 100644 index 439363973..000000000 --- a/scripts/spack/packages/tk/package.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other -# Spack Project Developers. See the top-level COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - -import os - -from spack.package import * - - -class Tk(AutotoolsPackage, SourceforgePackage): - """Tk is a graphical user interface toolkit that takes developing desktop - applications to a higher level than conventional approaches. Tk is the standard GUI - not only for Tcl, but for many other dynamic languages, and can produce rich, native - applications that run unchanged across Windows, Mac OS X, Linux and more.""" - - homepage = "https://www.tcl.tk" - sourceforge_mirror_path = "tcl/tk8.6.5-src.tar.gz" - - version("8.6.11", sha256="5228a8187a7f70fa0791ef0f975270f068ba9557f57456f51eb02d9d4ea31282") - version("8.6.10", sha256="63df418a859d0a463347f95ded5cd88a3dd3aaa1ceecaeee362194bc30f3e386") - version("8.6.8", sha256="49e7bca08dde95195a27f594f7c850b088be357a7c7096e44e1158c7a5fd7b33") - version("8.6.6", sha256="d62c371a71b4744ed830e3c21d27968c31dba74dd2c45f36b9b071e6d88eb19d") - version("8.6.5", sha256="fbbd93541b4cd467841208643b4014c4543a54c3597586727f0ab128220d7946") - version("8.6.3", sha256="ba15d56ac27d8c0a7b1a983915a47e0f635199b9473cf6e10fbce1fc73fd8333") - version("8.5.19", sha256="407af1de167477d598bd6166d84459a3bdccc2fb349360706154e646a9620ffa") - - variant("xft", default=True, description="Enable X FreeType") - variant("xss", default=True, description="Enable X Screen Saver") - - extends("tcl", type=("build", "link", "run")) - - depends_on("tcl@8.6:", type=("build", "link", "run"), when="@8.6:") - depends_on("libx11") - depends_on("libxft", when="+xft") - depends_on("libxscrnsaver", when="+xss") - - configure_directory = "unix" - - # https://core.tcl-lang.org/tk/tktview/3598664fffffffffffff - # https://core.tcl-lang.org/tk/info/8b679f597b1d17ad - # https://core.tcl-lang.org/tk/info/997b17c343444e48 - patch( - "https://raw.githubusercontent.com/macports/macports-ports/v2.7.0-archive/x11/tk/files/patch-unix-Makefile.in.diff", - sha256="54bba3d2b3550b7e2c636881c1a3acaf6e1eb743f314449a132864ff47fd0010", - level=0, - when="@:8.6.11 platform=darwin", - ) - patch( - "https://raw.githubusercontent.com/macports/macports-ports/v2.7.0-archive/x11/tk/files/patch-dyld_fallback_library_path.diff", - sha256="9ce6512f1928db9987986f4d3540207c39429395d5234bd6489ba9d86a6d9c31", - level=0, - when="platform=darwin", - ) - - def configure_args(self): - spec = self.spec - config_args = [ - "--with-tcl={0}".format(spec["tcl"].libs.directories[0]), - "--x-includes={0}".format(spec["libx11"].headers.directories[0]), - "--x-libraries={0}".format(spec["libx11"].libs.directories[0]), - ] - config_args += self.enable_or_disable("xft") - config_args += self.enable_or_disable("xss") - - return config_args - - def install(self, spec, prefix): - with working_dir(self.build_directory): - make("install") - - # Some applications like Expect require private Tk headers. - make("install-private-headers") - - # Copy source to install tree - installed_src = join_path(self.spec.prefix, "share", self.name, "src") - stage_src = os.path.realpath(self.stage.source_path) - install_tree(stage_src, installed_src) - - # Replace stage dir -> installed src dir in tkConfig - filter_file( - stage_src, - installed_src, - join_path(self.spec["tk"].libs.directories[0], "tkConfig.sh"), - ) - - @run_after("install") - def symlink_wish(self): - with working_dir(self.prefix.bin): - symlink("wish{0}".format(self.version.up_to(2)), "wish") - - def test(self): - self.run_test(self.spec["tk"].command.path, ["-h"], purpose="test wish command") - - test_data_dir = self.test_suite.current_test_data_dir - test_file = test_data_dir.join("test.tcl") - self.run_test( - self.spec["tcl"].command.path, test_file, purpose="test that tk can be loaded" - ) - - @property - def command(self): - """Returns the wish command. - Returns: - Executable: the wish command - """ - # Although we symlink wishX.Y to wish, we also need to support external - # installations that may not have this symlink, or may have multiple versions - # of Tk installed in the same directory. - return Executable( - os.path.realpath(self.prefix.bin.join("wish{0}".format(self.version.up_to(2)))) - ) - - @property - def libs(self): - return find_libraries( - ["libtk{0}".format(self.version.up_to(2))], root=self.prefix, recursive=True - ) - - def _find_script_dir(self): - # Put more-specific prefixes first - check_prefixes = [ - join_path(self.prefix, "share", "tk{0}".format(self.version.up_to(2))), - self.prefix, - ] - for prefix in check_prefixes: - result = find(prefix, "tk.tcl") - if result: - return os.path.dirname(sorted(result)[0]) - - def setup_run_environment(self, env): - """Set TK_LIBRARY to the directory containing tk.tcl. - For further info, see: - * https://www.tcl-lang.org/man/tcl/TkCmd/tkvars.htm - """ - # When using tkinter from within spack provided python+tkinter, - # python will not be able to find Tk unless TK_LIBRARY is set. - env.set("TK_LIBRARY", self._find_script_dir()) - - def setup_dependent_build_environment(self, env, dependent_spec): - """Set TK_LIBRARY to the directory containing tk.tcl. - For further info, see: - * https://www.tcl-lang.org/man/tcl/TkCmd/tkvars.htm - """ - env.set("TK_LIBRARY", self._find_script_dir()) diff --git a/scripts/spheral-setup-venv.in b/scripts/spheral-setup-venv.in index 38c86a73b..05363f62f 100644 --- a/scripts/spheral-setup-venv.in +++ b/scripts/spheral-setup-venv.in @@ -6,10 +6,10 @@ echo "Installing runtime python libraries ..." cp -r @VIRTUALENV_PYTHONPATH_COPY@ .venv/ &> /dev/null echo "Setup Spheral libraries ..." -cp Spheral.pth .venv/lib/python@Python3_VERSION_MAJOR@.@Python3_VERSION_MINOR@/site-packages/ -mkdir -p .venv/lib/python@Python3_VERSION_MAJOR@.@Python3_VERSION_MINOR@/site-packages/Spheral -cd @CMAKE_INSTALL_PREFIX@/.venv/lib/python@Python3_VERSION_MAJOR@.@Python3_VERSION_MINOR@/site-packages/Spheral -cp --symbolic-link @CMAKE_INSTALL_PREFIX@/Spheral/* . > /dev/null 2>&1 +cp @SPHERAL_SITE_PACKAGES_PATH@/Spheral.pth .venv/@SPHERAL_SITE_PACKAGES_PATH@/ +mkdir -p .venv/@SPHERAL_SITE_PACKAGES_PATH@/Spheral +cd @CMAKE_INSTALL_PREFIX@/.venv/@SPHERAL_SITE_PACKAGES_PATH@/Spheral +cp --symbolic-link @CMAKE_INSTALL_PREFIX@/@SPHERAL_SITE_PACKAGES_PATH@/Spheral/* . > /dev/null 2>&1 cd - > /dev/null # We need to reconfigure ATS to use our virtual env python otherwise ats will not be able to launch properly. @@ -28,6 +28,6 @@ cp --symbolic-link scripts/lcatstest.sh spheral-lcatstest &> /dev/null cd - > /dev/null echo "Byte-compiling packages in install path ..." -@CMAKE_INSTALL_PREFIX@/spheral -m compileall @CMAKE_INSTALL_PREFIX@/.venv/lib/python@Python3_VERSION_MAJOR@.@Python3_VERSION_MINOR@ +@CMAKE_INSTALL_PREFIX@/spheral -m compileall @CMAKE_INSTALL_PREFIX@/.venv/@SPHERAL_SITE_PACKAGES_PATH@ echo "Done." diff --git a/src/CRKSPH/CRKSPHHydros.py b/src/CRKSPH/CRKSPHHydros.py index ad277cc88..b5ddd0569 100644 --- a/src/CRKSPH/CRKSPHHydros.py +++ b/src/CRKSPH/CRKSPHHydros.py @@ -22,7 +22,8 @@ def CRKSPH(dataBase, damageRelieveRubble = False, ASPH = False, etaMinAxis = 0.1, - crktype = "default"): + crktype = "default", + smoothingScaleMethod = None): # We use the provided DataBase to sniff out what sort of NodeLists are being # used, and based on this determine which SPH object to build. @@ -62,10 +63,11 @@ def CRKSPH(dataBase, Q = eval("LimitedMonaghanGingoldViscosity%id(Clinear=%g, Cquadratic=%g)" % (ndim, Cl, Cq)) # Smoothing scale update - if ASPH: - smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim) - else: - smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim) + if smoothingScaleMethod is None: + if ASPH: + smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim) + else: + smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim) # Build the constructor arguments kwargs = {"smoothingScaleMethod" : smoothingScaleMethod, @@ -86,13 +88,16 @@ def CRKSPH(dataBase, if nsolid > 0: kwargs.update({"damageRelieveRubble" : damageRelieveRubble}) - if GeometryRegistrar.coords() == CoordinateType.RZ: - kwargs.update({"etaMinAxis" : etaMinAxis}) - # Build the thing. result = constructor(**kwargs) result.Q = Q result._smoothingScaleMethod = smoothingScaleMethod + + # If we're using area-weighted RZ, we need to reflect from the axis + if GeometryRegistrar.coords() == CoordinateType.RZ: + result.zaxisBC = AxisBoundaryRZ(etaMinAxis) + result.appendBoundary(result.zaxisBC) + return result #------------------------------------------------------------------------------- diff --git a/src/DEM/CMakeLists.txt b/src/DEM/CMakeLists.txt index 28606c290..acfb721eb 100644 --- a/src/DEM/CMakeLists.txt +++ b/src/DEM/CMakeLists.txt @@ -9,6 +9,7 @@ set(DEM_inst SolidBoundary/RectangularPlaneSolidBoundary SolidBoundary/CylinderSolidBoundary SolidBoundary/SphereSolidBoundary + SolidBoundary/ClippedSphereSolidBoundary ReplacePairFieldList IncrementPairFieldList ReplaceAndIncrementPairFieldList @@ -29,6 +30,7 @@ set(DEM_headers SolidBoundary/RectangularPlaneSolidBoundary.hh SolidBoundary/CylinderSolidBoundary.hh SolidBoundary/SphereSolidBoundary.hh + SolidBoundary/ClippedSphereSolidBoundary.hh ReplacePairFieldList.hh ReplaceAndIncrementPairFieldList.hh IncrementPairFieldList.hh diff --git a/src/DEM/DEM.py b/src/DEM/DEM.py index f24ae5f9b..98ba8ac91 100644 --- a/src/DEM/DEM.py +++ b/src/DEM/DEM.py @@ -18,6 +18,7 @@ def LinearSpringDEM(dataBase, cohesiveTensileStrength = 0.0, shapeFactor = 0.0, stepsPerCollision = 25, + enableFastTimeStepping = True, xmin = (-1e100, -1e100, -1e100), xmax = ( 1e100, 1e100, 1e100)): @@ -32,6 +33,7 @@ def LinearSpringDEM(dataBase, assert dynamicFrictionCoefficient >= 0, "dynamicFrictionCoefficient must be positive" assert rollingFrictionCoefficient >= 0, "rollingFrictionCoefficient must be positive" assert torsionalFrictionCoefficient >= 0, "torsionalFrictionCoefficient must be positive" + assert isinstance(enableFastTimeStepping,bool) #if (stepsPerCollision < 10) print("WARNING: stepsPerCollision is very low, reccomended is 25-50") @@ -60,6 +62,7 @@ def LinearSpringDEM(dataBase, "cohesiveTensileStrength" : cohesiveTensileStrength, "shapeFactor" : shapeFactor, "stepsPerCollision" : stepsPerCollision, + "enableFastTimeStepping" : enableFastTimeStepping, "xmin" : eval("Vector%id(%g, %g, %g)" % xmin), "xmax" : eval("Vector%id(%g, %g, %g)" % xmax)} @@ -83,6 +86,7 @@ def DEM(dataBase, cohesiveTensileStrength=0.0, shapeFactor=0.0, stepsPerCollision = 25, + enableFastTimeStepping = True, xmin = (-1e100, -1e100, -1e100), xmax = ( 1e100, 1e100, 1e100)): return LinearSpringDEM(dataBase, @@ -97,6 +101,7 @@ def DEM(dataBase, cohesiveTensileStrength, shapeFactor, stepsPerCollision, + enableFastTimeStepping, xmin, xmax) diff --git a/src/DEM/DEMBase.cc b/src/DEM/DEMBase.cc index 6057c68f1..03867b6e2 100644 --- a/src/DEM/DEMBase.cc +++ b/src/DEM/DEMBase.cc @@ -18,7 +18,6 @@ #include "DataBase/ReplaceState.hh" #include "DataBase/DataBase.hh" - #include "Field/FieldList.hh" #include "Field/NodeIterators.hh" @@ -47,7 +46,6 @@ #include "omp.h" #endif - #include #include #include @@ -81,6 +79,7 @@ DEMBase(const DataBase& dataBase, const Vector& xmax): Physics(), mDataBase(dataBase), + mNewSolidBoundaryIndex(0), mSolidBoundaries(), mCycle(0), mContactRemovalFrequency((int)stepsPerCollision), @@ -316,9 +315,12 @@ registerState(DataBase& dataBase, auto rollingDisplacementPolicy = make_policy>>(); auto torsionalDisplacementPolicy = make_policy>>(); + // solid boundary conditions w/ properties that need to be integrated auto boundaryPolicy = make_policy>(mSolidBoundaries); - - state.enroll(DEMFieldNames::solidBoundaries,boundaryPolicy); + state.enroll(DEMFieldNames::solidBoundaryPolicy,boundaryPolicy); + + for (auto ibc = 0u; ibc < this->numSolidBoundaries(); ++ibc){ + mSolidBoundaries[ibc]->registerState(dataBase,state);} state.enroll(mTimeStepMask); state.enroll(mass); @@ -410,6 +412,21 @@ preStepInitialize(const DataBase& dataBase, TIME_END("DEMpreStepInitialize"); } +//------------------------------------------------------------------------------ +// This method is called once at the beginning of a timestep, after all state registration. +//------------------------------------------------------------------------------ +// template +// void +// DEMBase:: +// finalize(const DataBase& dataBase, +// State& state, +// StateDerivatives& derivatives) { +// TIME_BEGIN("DEMfinalize"); + + +// TIME_END("DEMfinalize"); +// } + //------------------------------------------------------------------------------ // Call before deriv evaluation //------------------------------------------------------------------------------ @@ -421,8 +438,9 @@ initialize(const Scalar time, const DataBase& dataBase, State& state, StateDerivatives& derivs){ +TIME_BEGIN("DEMinitialize"); - +TIME_END("DEMinitialize"); } @@ -576,6 +594,8 @@ void DEMBase:: initializeOverlap(const DataBase& dataBase, const int startingCompositeParticleIndex){ + TIME_BEGIN("DEMinitializeOverlap"); + const auto& connectivityMap = dataBase.connectivityMap(); const auto& pairs = connectivityMap.nodePairList(); const auto numPairs = pairs.size(); @@ -616,6 +636,7 @@ initializeOverlap(const DataBase& dataBase, const int startingComposi mEquilibriumOverlap(storeNodeList,storeNode)[storeContact] = delta0; } } + TIME_END("DEMinitializeOverlap"); } //------------------------------------------------------------------------------ // Redistribution methods -- before we redistribute, we are going to make sure @@ -628,6 +649,9 @@ template void DEMBase:: initializeBeforeRedistribution(){ + + TIME_BEGIN("DEMinitializeBeforeRedistribution"); + this->prepNeighborIndicesForRedistribution(); this->prepPairFieldListForRedistribution(mShearDisplacement); @@ -641,6 +665,9 @@ initializeBeforeRedistribution(){ this->prepPairFieldListForRedistribution(mNewRollingDisplacement); this->prepPairFieldListForRedistribution(mDDtTorsionalDisplacement); this->prepPairFieldListForRedistribution(mNewTorsionalDisplacement); + + TIME_END("DEMinitializeBeforeRedistribution"); + } template @@ -650,7 +677,14 @@ finalizeAfterRedistribution() { } //------------------------------------------------------------------------------ -// redistribution sub function +// redistribution sub function -- we store the pairwise fields in one of the +// nodes. Prior to redistribution, we don't know where the new domain +// boundaries will be so we want to store the pairwise data in both nodes. +// pairs that span a domain boundary already do this so we just need to make +// sure all the internal contacts data is stored in both nodes. We do this +// by looking through the contacts for each node. If the pair node is an +// internal node than it needs to add the current (storage node) as a contact. +// The NeighborIndices needs special treatement so it gets its own method //------------------------------------------------------------------------------ template template @@ -752,6 +786,8 @@ void DEMBase:: updateContactMap(const DataBase& dataBase){ + TIME_BEGIN("DEMupdateContactMap"); + const auto& uniqueIndex = dataBase.DEMUniqueIndex(); const auto& radius = dataBase.DEMParticleRadius(); const auto& position = dataBase.DEMPosition(); @@ -846,7 +882,7 @@ updateContactMap(const DataBase& dataBase){ if (disBc.magnitude() < Ri*(1+bufferDistance)){ // create a unique index for the boundary condition - const auto uId_bc = this->getSolidBoundaryUniqueIndex(ibc); + const auto uId_bc = solidBoundaryi->uniqueIndex(); // check to see if it already exists const auto neighborContacts = mNeighborIndices(nodeListi,i); @@ -861,7 +897,7 @@ updateContactMap(const DataBase& dataBase){ // now add our contact #pragma omp critical { - mContactStorageIndices.push_back(ContactIndex(nodeListi, // storage nodelist index + mContactStorageIndices.push_back(ContactIndex(nodeListi, // storage nodelist index i, // storage node index storageContactIndex, // storage contact index ibc)); // bc index @@ -870,6 +906,7 @@ updateContactMap(const DataBase& dataBase){ } // loop nodes } // loop nodelists } // loop solid boundaries + TIME_END("DEMupdateContactMap"); } // method //------------------------------------------------------------------------------ @@ -879,7 +916,7 @@ template void DEMBase:: identifyInactiveContacts(const DataBase& dataBase){ - + TIME_BEGIN("DEMidentifyInactiveContacts"); const auto bufferDistance = dataBase.maxNeighborSearchBuffer(); const auto numNodeLists = dataBase.numNodeLists(); const auto& nodeListPtrs = dataBase.DEMNodeListPtrs(); @@ -934,5 +971,6 @@ identifyInactiveContacts(const DataBase& dataBase){ } // loop particle bc contacts } // omp parallel region +TIME_END("DEMidentifyInactiveContacts"); } // class } // namespace diff --git a/src/DEM/DEMBase.hh b/src/DEM/DEMBase.hh index 5b158f0d3..2053eb918 100644 --- a/src/DEM/DEMBase.hh +++ b/src/DEM/DEMBase.hh @@ -87,6 +87,14 @@ public: const DataBase& dataBase, const State& state, StateDerivatives& derivs) const override; + + // hook after the intergrator step + // virtual + // void finalize(const Scalar time, + // const Scalar dt, + // const DataBase& dataBase, + // const State& state, + // StateDerivatives& derivs) const override; // Apply boundary conditions to the physics specific fields. virtual @@ -192,12 +200,13 @@ public: const Vector vrotj) const; // Solid Bounderies + int newSolidBoundaryIndex() const; void appendSolidBoundary(SolidBoundaryBase& boundary); void clearSolidBoundaries(); + void removeSolidBoundary(const SolidBoundaryBase& boundary); bool haveSolidBoundary(const SolidBoundaryBase& boundary) const; unsigned int numSolidBoundaries() const; const std::vector*>& solidBoundaryConditions() const; - int getSolidBoundaryUniqueIndex(const int x) const; // counts unsigned int numParticleParticleContacts() const; @@ -216,13 +225,14 @@ protected: const DataBase& mDataBase; + int mNewSolidBoundaryIndex; std::vector*> mSolidBoundaries; - int mCycle; - int mContactRemovalFrequency; + int mCycle; // current cycle + int mContactRemovalFrequency; // how often do we clear out old contacts // number of steps per collision time-scale - Scalar mStepsPerCollision; + Scalar mStepsPerCollision; // Optional bounding box for generating the mesh. Vector mxmin, mxmax; @@ -250,6 +260,7 @@ protected: FieldList> mDDtTorsionalDisplacement; // derivative to evolve frictional spring displacement FieldList> mNewTorsionalDisplacement; // handles rotation of frictional spring and reset on slip + // map to storage location from connectivityMap to pairwise fieldlists std::vector mContactStorageIndices; // The restart registration. diff --git a/src/DEM/DEMBaseInline.hh b/src/DEM/DEMBaseInline.hh index dffe855e2..f716956dc 100644 --- a/src/DEM/DEMBaseInline.hh +++ b/src/DEM/DEMBaseInline.hh @@ -321,8 +321,6 @@ rollingMoment(const Dim<3>::Vector rhatij, return DEMDimension>::cross((vroti + vrotj),rhatij).unitVector(); } - - //------------------------------------------------------------------------------ // Add a Boundary condition to the end of the current boundary list. //------------------------------------------------------------------------------ @@ -331,7 +329,9 @@ inline void DEMBase:: appendSolidBoundary(SolidBoundaryBase& boundary) { - mSolidBoundaries.push_back(&boundary); + mNewSolidBoundaryIndex -= 1; + boundary.uniqueIndex(mNewSolidBoundaryIndex); + mSolidBoundaries.push_back(&boundary); } //------------------------------------------------------------------------------ @@ -345,6 +345,15 @@ clearSolidBoundaries() { mSolidBoundaries = std::vector*>(); } + +template +inline +int +DEMBase:: +newSolidBoundaryIndex() const { + return mNewSolidBoundaryIndex; +} + //------------------------------------------------------------------------------ // Test if the given Boundary condition is listed in the physics package. //------------------------------------------------------------------------------ @@ -356,6 +365,16 @@ haveSolidBoundary(const SolidBoundaryBase& boundary) const { return std::count(mSolidBoundaries.begin(), mSolidBoundaries.end(), &boundary) > 0; } +template +inline +void +DEMBase:: +removeSolidBoundary(const SolidBoundaryBase& boundary) { + const auto bcPtr = std::find(mSolidBoundaries.begin(),mSolidBoundaries.end(),&boundary); + if (bcPtr != mSolidBoundaries.end()) mSolidBoundaries.erase(bcPtr); +} + + template inline unsigned int @@ -401,12 +420,4 @@ numParticleBoundaryContacts() const { return (mContactStorageIndices.size()-this->numParticleParticleContacts()); } -template -inline -int -DEMBase:: -getSolidBoundaryUniqueIndex(const int x) const { - return -x-1; -} - } diff --git a/src/DEM/DEMFieldNames.cc b/src/DEM/DEMFieldNames.cc index ce4603288..b0420b608 100644 --- a/src/DEM/DEMFieldNames.cc +++ b/src/DEM/DEMFieldNames.cc @@ -18,4 +18,6 @@ const std::string Spheral::DEMFieldNames::shearDisplacement = "shear displacemen const std::string Spheral::DEMFieldNames::rollingDisplacement = "rolling displacement"; const std::string Spheral::DEMFieldNames::torsionalDisplacement = "torsional displacement"; const std::string Spheral::DEMFieldNames::equilibriumOverlap = "equilibrium overlap"; -const std::string Spheral::DEMFieldNames::solidBoundaries = "solid boundaries"; \ No newline at end of file +const std::string Spheral::DEMFieldNames::maximumOverlap = "maximum overlap"; +const std::string Spheral::DEMFieldNames::solidBoundaries = "solid boundaries"; +const std::string Spheral::DEMFieldNames::solidBoundaryPolicy = "solid boundary policy"; \ No newline at end of file diff --git a/src/DEM/DEMFieldNames.hh b/src/DEM/DEMFieldNames.hh index e3109e33c..3358a9925 100644 --- a/src/DEM/DEMFieldNames.hh +++ b/src/DEM/DEMFieldNames.hh @@ -23,7 +23,9 @@ struct DEMFieldNames { static const std::string rollingDisplacement; static const std::string torsionalDisplacement; static const std::string equilibriumOverlap; + static const std::string maximumOverlap; static const std::string solidBoundaries; + static const std::string solidBoundaryPolicy; }; } diff --git a/src/DEM/LinearSpringDEM.cc b/src/DEM/LinearSpringDEM.cc index 05cef1c15..56e3e2f92 100644 --- a/src/DEM/LinearSpringDEM.cc +++ b/src/DEM/LinearSpringDEM.cc @@ -18,7 +18,7 @@ #include "DataBase/StateDerivatives.hh" #include "DataBase/DataBase.hh" #include "DataBase/IncrementState.hh" -#include "DataBase/ReplaceState.hh" +#include "DataBase/MaxReplaceState.hh" #include "Field/FieldList.hh" #include "Neighbor/ConnectivityMap.hh" @@ -33,6 +33,8 @@ #include "DEM/ContactStorageLocation.hh" #include "DEM/SolidBoundary/SolidBoundaryBase.hh" +#include "Utilities/Timer.hh" + #ifdef _OPENMP #include "omp.h" #endif @@ -78,9 +80,11 @@ LinearSpringDEM(const DataBase& dataBase, const Scalar cohesiveTensileStrength, const Scalar shapeFactor, const Scalar stepsPerCollision, + const bool enableFastTimeStepping, const Vector& xmin, const Vector& xmax): DEMBase(dataBase,stepsPerCollision,xmin,xmax), + mEnableFastTimeStepping(enableFastTimeStepping), mNormalSpringConstant(normalSpringConstant), mNormalRestitutionCoefficient(normalRestitutionCoefficient), mTangentialSpringConstant(tangentialSpringConstant), @@ -93,8 +97,17 @@ LinearSpringDEM(const DataBase& dataBase, mShapeFactor(shapeFactor), mNormalBeta(M_PI/std::log(std::max(normalRestitutionCoefficient,1.0e-3))), mTangentialBeta(M_PI/std::log(std::max(tangentialRestitutionCoefficient,1.0e-3))), - mMomentOfInertia(FieldStorageType::CopyFields) { + mCollisionDuration(0.0), + mMomentOfInertia(FieldStorageType::CopyFields), + mMaximumOverlap(FieldStorageType::CopyFields), + mNewMaximumOverlap(FieldStorageType::CopyFields) { mMomentOfInertia = dataBase.newDEMFieldList(0.0, DEMFieldNames::momentOfInertia); + mMaximumOverlap = dataBase.newDEMFieldList(0.0, DEMFieldNames::maximumOverlap); + mNewMaximumOverlap = dataBase.newDEMFieldList(0.0,MaxReplaceState::prefix() + DEMFieldNames::maximumOverlap); + + const auto mass = dataBase.DEMMass(); + const auto minMass = mass.min(); + mCollisionDuration = M_PI*std::sqrt(0.5*minMass/mNormalSpringConstant * (1.0 + 1.0/(mNormalBeta*mNormalBeta))); } //------------------------------------------------------------------------------ @@ -112,18 +125,50 @@ typename LinearSpringDEM::TimeStepType LinearSpringDEM:: dt(const DataBase& dataBase, const State& state, - const StateDerivatives& /*derivs*/, - const typename Dimension::Scalar /*currentTime*/) const{ + const StateDerivatives& derivs, + const typename Dimension::Scalar currentTime) const { + + TIME_BEGIN("LinearSpringDEMdt"); + + auto dtMin = std::numeric_limits::max(); + TimeStepType result(dtMin, "DEM error, this message should not get to the end"); + + if (this->enableFastTimeStepping()){ + result = this->variableTimeStep(dataBase, + state, + derivs, + currentTime); + }else{ + result = this->fixedTimeStep(); + } + + TIME_END("LinearSpringDEMdt"); + return result; +} + +//------------------------------------------------------------------------------ +// set our timestep (for now its a constant single value) +//------------------------------------------------------------------------------ +template +typename LinearSpringDEM::TimeStepType +LinearSpringDEM:: +fixedTimeStep() const { + return make_pair(mCollisionDuration/this->stepsPerCollision(),("fixed-dt Linear Spring DEM vote for time step")); +} +template +typename LinearSpringDEM::TimeStepType +LinearSpringDEM:: +variableTimeStep(const DataBase& dataBase, + const State& state, + const StateDerivatives& /*derivs*/, + const typename Dimension::Scalar /*currentTime*/) const { + // Get some useful fluid variables from the DataBase. const auto mass = state.fields(HydroFieldNames::mass, 0.0); const auto position = state.fields(HydroFieldNames::position, Vector::zero); const auto velocity = state.fields(HydroFieldNames::velocity, Vector::zero); const auto r = state.fields(DEMFieldNames::particleRadius, 0.0); - //const auto& connectivityMap = dataBase.connectivityMap(this->requireGhostConnectivity(), - // this->requireOverlapConnectivity(), - // this->requireIntersectionConnectivity()); - const auto& contacts = this->contactStorageIndices(); const unsigned int numP2PContacts = this->numParticleParticleContacts(); @@ -215,7 +260,7 @@ dt(const DataBase& dataBase, //solid boundary and distance vector to particle i const auto& solidBoundary = solidBoundaries[bci]; const auto rib = solidBoundary->distance(ri); - const auto vb = solidBoundary->velocity(ri); + const auto vb = solidBoundary->localVelocity(ri); // Compare closing speed to separation const auto vib = vi-vb; @@ -251,6 +296,10 @@ dt(const DataBase& dataBase, // Ensure no point moves further than the buffer distance in one timestep + //-------------------------------------------------------------------------------------- + // NOTE: it would be nice if this wasn't based on the absolute velocity for cases + // where we have a blob of dem particles moving at elevated speeds + //-------------------------------------------------------------------------------------- const auto numNodeLists = position.size(); for (auto k = 0u; k < numNodeLists; ++k) { const auto n = position[k]->size(); @@ -271,7 +320,6 @@ dt(const DataBase& dataBase, return result; } - //------------------------------------------------------------------------------ // method that fires once on startup //------------------------------------------------------------------------------ @@ -279,8 +327,10 @@ template void LinearSpringDEM:: initializeProblemStartup(DataBase& dataBase){ + TIME_BEGIN("LinearSpringDEMinitializeProblemStartup"); DEMBase::initializeProblemStartup(dataBase); this->setMomentOfInertia(); + TIME_END("LinearSpringDEMinitializeProblemStartup"); } //------------------------------------------------------------------------------ @@ -291,10 +341,40 @@ void LinearSpringDEM:: registerState(DataBase& dataBase, State& state) { + TIME_BEGIN("LinearSpringDEMregisterState"); + DEMBase::registerState(dataBase,state); + dataBase.resizeDEMFieldList(mMomentOfInertia, 0.0, DEMFieldNames::momentOfInertia, false); + dataBase.resizeDEMFieldList(mMaximumOverlap, 0.0, DEMFieldNames::maximumOverlap, false); + + auto maxOverlapPolicy = make_policy>(); + state.enroll(mMomentOfInertia); + state.enroll(mMaximumOverlap, maxOverlapPolicy); + + TIME_END("LinearSpringDEMregisterState"); +} + +//------------------------------------------------------------------------------ +// Register the state we need/are going to evolve. +//------------------------------------------------------------------------------ +template +void +LinearSpringDEM:: +registerDerivatives(DataBase& dataBase, + StateDerivatives& derivs) { + TIME_BEGIN("LinearSpringDEMregisterDerivs"); + + DEMBase::registerDerivatives(dataBase,derivs); + + dataBase.resizeDEMFieldList(mNewMaximumOverlap, 0.0, DEMFieldNames::maximumOverlap, false); + + derivs.enroll(mNewMaximumOverlap); + + TIME_END("LinearSpringDEMregisterDerivs"); } + //------------------------------------------------------------------------------ // evaluate the derivatives //------------------------------------------------------------------------------ @@ -306,7 +386,7 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, const DataBase& dataBase, const State& state, StateDerivatives& derivatives) const{ - + TIME_BEGIN("LinearSpringDEMevaluateDerivatives"); this->resizeDerivativePairFieldLists(derivatives); // A few useful constants we'll use in the following loop. @@ -316,8 +396,8 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, const auto muD = this->dynamicFrictionCoefficient(); const auto muS = this->staticFrictionCoefficient(); - const auto muT = this->torsionalFrictionCoefficient(); - const auto muR = this->rollingFrictionCoefficient(); + const auto muT = this->torsionalFrictionCoefficient() * shapeFactor * muS; + const auto muR = this->rollingFrictionCoefficient() * shapeFactor; const auto Cc = this->cohesiveTensileStrength(); @@ -332,7 +412,7 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, const auto invKr = 1.0/max(kr,tiny); const auto normalDampingTerms = 2.0*kn/(1.0+mNormalBeta*mNormalBeta); - const auto tangentialDampingTerms = 4.0/5.0*ks/(1.0+mTangentialBeta*mTangentialBeta); + const auto tangentialDampingTerms = 2.0*ks/(1.0+mTangentialBeta*mTangentialBeta); // The connectivity. const auto& nodeLists = dataBase.DEMNodeListPtrs(); @@ -378,10 +458,12 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, auto DxDt = derivatives.fields(IncrementState::prefix() + HydroFieldNames::position, Vector::zero); auto DvDt = derivatives.fields(HydroFieldNames::hydroAcceleration, Vector::zero); auto DomegaDt = derivatives.fields(IncrementState::prefix() + DEMFieldNames::angularVelocity, DEMDimension::zero); + auto newMaximumOverlap = derivatives.fields(MaxReplaceState::prefix() + DEMFieldNames::maximumOverlap, 0.0); CHECK(DxDt.size() == numNodeLists); CHECK(DvDt.size() == numNodeLists); CHECK(DomegaDt.size() == numNodeLists); + CHECK(newMaximumOverlap.size() == numNodeLists); // Get the deriv pairFieldLists auto DDtShearDisplacement = derivatives.fields(ReplaceAndIncrementPairFieldList>::incrementPrefix() + DEMFieldNames::shearDisplacement, std::vector()); @@ -412,6 +494,7 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, typename SpheralThreads::FieldListStack threadStack; auto DvDt_thread = DvDt.threadCopy(threadStack); auto DomegaDt_thread = DomegaDt.threadCopy(threadStack); + auto newMaxOverlap_thread = newMaximumOverlap.threadCopy(threadStack, ThreadReduction::MAX); //------------------------------------ // particle-particle contacts @@ -449,7 +532,7 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, // if so do the things if (delta0 > 0.0){ - + // get remaining state for node i const auto cIdi = compositeIndex(nodeListi,i); const auto uIdi = uniqueIndices(nodeListi,i); @@ -473,10 +556,12 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, // Get the derivs from node i auto& DvDti = DvDt_thread(nodeListi, i); auto& DomegaDti = DomegaDt_thread(nodeListi, i); + auto& maxOverlapi = newMaxOverlap_thread(nodeListi,i); // Get the derivs from node j auto& DvDtj = DvDt_thread(nodeListj, j); auto& DomegaDtj = DomegaDt_thread(nodeListj, j); + auto& maxOverlapj = newMaxOverlap_thread(nodeListj,j); // storage sign, this makes pairwise values i-j independent const int storageSign = (uIdi <= uIdj ? 1 : -1); @@ -489,6 +574,7 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, // boolean checks const auto isBondedParticle = (cIdi == cIdj); + const auto allowSliding = (!isBondedParticle); // effective delta const auto delta = delta0 - overlapij; @@ -516,8 +602,8 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, const Vector vij = vi-vj + li*vroti - lj*vrotj; - const Scalar vn = vij.dot(rhatij); // normal velocity - const Vector vs = vij - vn*rhatij; // sliding velocity + const Scalar vn = vij.dot(rhatij); // normal velocity + const Vector vs = vij - vn*rhatij; // sliding velocity const Vector vr = -li*vroti - lj*vrotj; // rolling velocity const Scalar vt = -lij*DEMDimension::dot(omegai-omegaj,rhatij); // torsion velocity @@ -529,76 +615,39 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, // sliding //------------------------------------------------------------ - // project onto new tangential plane -- maintain magnitude - Vector newDeltaSlidij = (deltaSlidij - rhatij.dot(deltaSlidij)*rhatij).unitVector()*deltaSlidij.magnitude(); - - // spring dashpot - const Vector ft0spring = - ks*newDeltaSlidij; - const Vector ft0damp = - Cs*vs; - Vector ft = ft0spring + ft0damp; - - // static friction limit - if (!isBondedParticle and (ft.magnitude() > muS*fnMag)){ - - const Scalar ftDynamic = muD*fnMag; - ft = ftDynamic*ft.unitVector(); - - newDeltaSlidij = ( ft0damp.magnitude() > ftDynamic ? - Vector::zero : - -(ft-ft0damp)*invKs ); - - } + Vector newDeltaSlidij, fs; + this->slidingSpringDamper(ks,Cs,muS,muD,deltaSlidij,vs,fnMag,invKs,rhatij,allowSliding, + newDeltaSlidij,fs); // outputs // torsion //------------------------------------------------------------ - // since we use a scalar no need to modify here - auto newDeltaTorsij = deltaTorsij; - - // spring dashpot - const Scalar Mt0spring = - kt*newDeltaTorsij; - const Scalar Mt0damp = - Ct*vt; - Scalar MtorsionMag = (Mt0spring + Mt0damp); - const Scalar MtStatic = muT*shapeFactor*muS*fnMag; - - // limit to static - if (!isBondedParticle and (std::abs(MtorsionMag) > MtStatic)){ - MtorsionMag = (MtorsionMag > 0.0 ? 1.0 : -1.0)*MtStatic; - newDeltaTorsij = (std::abs(Mt0damp) > MtStatic ? 0.0 : -(MtorsionMag-Mt0damp)*invKt); - } + Scalar newDeltaTorsij, ft; + this->slidingSpringDamper(kt,Ct,muT,muT,deltaTorsij,vt,fnMag,invKt,allowSliding, + newDeltaTorsij,ft); // output // rolling //------------------------------------------------------------ - // project onto new tangential plane -- maintain magnitude - Vector newDeltaRollij = (deltaRollij - rhatij.dot(deltaRollij)*rhatij).unitVector()*deltaRollij.magnitude(); - - // spring dashpot - const Vector Mr0spring = - kr*newDeltaRollij; - const Vector Mr0damp = - Cr*vr; - Vector effectiveRollingForce = (Mr0spring + Mr0damp); - const Scalar MrStatic = muR*shapeFactor*fnMag; - - // limit to static - if (!isBondedParticle and (effectiveRollingForce.magnitude() > MrStatic)){ - effectiveRollingForce = MrStatic*effectiveRollingForce.unitVector(); - newDeltaRollij = (Mr0damp.magnitude() > MrStatic ? - Vector::zero : - -(effectiveRollingForce-Mr0damp)*invKr); - } - + Vector newDeltaRollij, fr; + this->slidingSpringDamper(kr,Cr,muR,muR,deltaRollij,vr,fnMag,invKr,rhatij,allowSliding, + newDeltaRollij,fr); // outputs // accelerations //------------------------------------------------------------ // Rectilinear Acceleration - const Vector fij = fn - fc + ft; + const Vector fij = fn - fc + fs; DvDti += fij/mi; DvDtj -= fij/mj; // angular acceleration const auto Msliding = -DEMDimension::cross(rhatij,fij); - const auto Mrolling = -DEMDimension::cross(rhatij,effectiveRollingForce); - const auto Mtorsion = MtorsionMag * this->torsionMoment(rhatij,omegai,omegaj); // rename torsionDirection + const auto Mrolling = -DEMDimension::cross(rhatij,fr); + const auto Mtorsion = ft * this->torsionMoment(rhatij,omegai,omegaj); // rename torsionDirection DomegaDti += (Msliding*li - (Mtorsion + Mrolling) * lij)/Ii; DomegaDtj += (Msliding*lj + (Mtorsion + Mrolling) * lij)/Ij; + // update max overlaps + maxOverlapi = max(maxOverlapi,delta); + maxOverlapj = max(maxOverlapj,delta); + // for spring updates newShearDisplacement(nodeListi,i)[contacti] = storageSign*newDeltaSlidij; DDtShearDisplacement(nodeListi,i)[contacti] = storageSign*vs; @@ -663,7 +712,7 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, const auto deltaTorsib = torsionalDisplacement(nodeListi,i)[contacti]; // velocity of boundary @ ri - const auto vb = solidBoundary->velocity(ri); + const auto vb = solidBoundary->localVelocity(ri); // line of action for the contact const auto rhatib = rib.unitVector(); @@ -675,12 +724,7 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, const auto mib = 2*mi; const auto lib = 2*li; - // damping constants -- ct and cr derived quantities ala Zhang 2017 - //----------------------------To-Do----------------------------------- - // oh man okay, solid bc's were overdamping. The 0.5 factor got the - // coeff of restitution right (i guess i missed a factor of 2 somewhere) - // need to go back and do the math later. - //-------------------------------------------------------------------- + // damping coefficients const auto Cn = std::sqrt(mib*normalDampingTerms); const auto Cs = std::sqrt(mib*tangentialDampingTerms); const auto Ct = 0.50 * Cs * shapeFactor2; @@ -704,62 +748,21 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, // sliding //------------------------------------------------------------ - // project onto new tangential plane -- maintain magnitude - Vector newDeltaSlidib = (deltaSlidib - rhatib.dot(deltaSlidib)*rhatib).unitVector()*deltaSlidib.magnitude(); - - // spring dashpot - const Vector ft0spring = - ks*newDeltaSlidib; - const Vector ft0damp = - Cs*vs; - Vector ft = ft0spring + ft0damp; - - // static friction limit - if (ft.magnitude() > muS*fnMag){ - - const Scalar ftDynamic = muD*fnMag; - ft = ftDynamic*ft.unitVector(); - - newDeltaSlidib = ( ft0damp.magnitude() > ftDynamic ? - Vector::zero : - -(ft-ft0damp)*invKs ); - - } + Vector newDeltaSlidib, ft; + this->slidingSpringDamper(ks,Cs,muS,muD,deltaSlidib,vs,fnMag,invKs,rhatib,true, + newDeltaSlidib,ft); // outputs // torsion //------------------------------------------------------------ - // since we use a scalar no need to modify here - auto newDeltaTorsib = deltaTorsib; - - // spring dashpot - const Scalar Mt0spring = - kt*newDeltaTorsib; - const Scalar Mt0damp = - Ct*vt; - Scalar MtorsionMag = (Mt0spring + Mt0damp); - const Scalar MtStatic = muT*shapeFactor*muS*fnMag; - - // limit to static - if (std::abs(MtorsionMag) > MtStatic){ - MtorsionMag = (MtorsionMag > 0.0 ? 1.0 : -1.0)*MtStatic; - newDeltaTorsib = (std::abs(Mt0damp) > MtStatic ? 0.0 : -(MtorsionMag-Mt0damp)*invKt); - } + Scalar newDeltaTorsib, MtorsionMag; + this->slidingSpringDamper(kt,Ct,muT,muT,deltaTorsib,vt,fnMag,invKt,true, + newDeltaTorsib, MtorsionMag); // output // rolling //------------------------------------------------------------ - // project onto new tangential plane -- maintain magnitude - Vector newDeltaRollib = (deltaRollib - rhatib.dot(deltaRollib)*rhatib).unitVector()*deltaRollib.magnitude(); - - // spring dashpot - const Vector Mr0spring = - kr*newDeltaRollib; - const Vector Mr0damp = - Cr*vr; - Vector effectiveRollingForce = (Mr0spring + Mr0damp); - const Scalar MrStatic = muR*shapeFactor*fnMag; - - // limit to static - if (effectiveRollingForce.magnitude() > MrStatic){ - effectiveRollingForce = MrStatic*effectiveRollingForce.unitVector(); - newDeltaRollib = (Mr0damp.magnitude() > MrStatic ? - Vector::zero : - -(effectiveRollingForce-Mr0damp)*invKr); - } - + Vector newDeltaRollib, fr; + this->slidingSpringDamper(kr,Cr,muR,muR,deltaRollib,vr,fnMag,invKr,rhatib,true, + newDeltaRollib, fr); // outputs // accelerations //------------------------------------------------------------ // Rectilinear Acceleration @@ -768,7 +771,7 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, // angular acceleration const auto Msliding = -DEMDimension::cross(rhatib,fib); - const auto Mrolling = -DEMDimension::cross(rhatib,effectiveRollingForce); + const auto Mrolling = -DEMDimension::cross(rhatib,fr); const auto Mtorsion = MtorsionMag * this->torsionMoment(rhatib,omegai,0*omegai); // rename torsionDirection DomegaDti += (Msliding*li - (Mtorsion + Mrolling) * lib)/Ii; @@ -796,6 +799,8 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, DxDt(nodeListi,i) = veli; } // loop nodes } // loop nodelists + + TIME_END("LinearSpringDEMevaluateDerivatives"); } // method @@ -875,6 +880,8 @@ LinearSpringDEM:: dumpState(FileIO& file, const string& pathName) const { DEMBase::dumpState(file,pathName); file.write(mMomentOfInertia, pathName + "/momentOfInertia"); + file.write(mMaximumOverlap, pathName + "/maximumOverlap"); + file.write(mNewMaximumOverlap, pathName + "/newMaximumOverlap"); } //------------------------------------------------------------------------------ @@ -886,6 +893,8 @@ LinearSpringDEM:: restoreState(const FileIO& file, const string& pathName) { DEMBase::restoreState(file,pathName); file.read(mMomentOfInertia, pathName + "/momentOfInertia"); + file.read(mMaximumOverlap, pathName + "/maximumOverlap"); + file.read(mNewMaximumOverlap, pathName + "/newMaximumOverlap"); } } // namespace diff --git a/src/DEM/LinearSpringDEM.hh b/src/DEM/LinearSpringDEM.hh index b932dc7d5..6ac98bbcd 100644 --- a/src/DEM/LinearSpringDEM.hh +++ b/src/DEM/LinearSpringDEM.hh @@ -49,6 +49,7 @@ public: const Scalar cohesiveTensileStrength, const Scalar shapeFactor, const Scalar stepsPerCollision, + const bool enableFastTimeStepping, const Vector& xmin, const Vector& xmax); @@ -65,19 +66,59 @@ public: virtual void registerState(DataBase& dataBase, State& state) override; + virtual void registerDerivatives(DataBase& dataBase, + StateDerivatives& derivs) override; + virtual void evaluateDerivatives(const Scalar time, const Scalar dt, const DataBase& dataBase, const State& state, StateDerivatives& derivs) const override; - virtual - void applyGhostBoundaries(State& state, - StateDerivatives& derivs) override; - virtual - void enforceBoundaries(State& state, - StateDerivatives& derivs) override; + + virtual void applyGhostBoundaries(State& state, + StateDerivatives& derivs) override; + + virtual void enforceBoundaries(State& state, + StateDerivatives& derivs) override; + + // sub-methods for dt + TimeStepType variableTimeStep(const DataBase& dataBase, + const State& state, + const StateDerivatives& derivs, + const Scalar time) const; + + TimeStepType fixedTimeStep() const; + + // generalized spring damper functions (inlined) + void slidingSpringDamper(const Scalar k, + const Scalar C, + const Scalar mus, + const Scalar mud, + const Vector& x, + const Vector& DxDt, + const Scalar fnMag, + const Scalar invK, + const Vector& rhatij, + const bool allowSiding, + Vector& xNew, + Vector& force) const; + + void slidingSpringDamper(const Scalar k, + const Scalar C, + const Scalar mus, + const Scalar mud, + const Scalar x, + const Scalar DxDt, + const Scalar fnMag, + const Scalar invK, + const bool allowSiding, + Scalar& xNew, + Scalar& force) const; // set/gets + bool enableFastTimeStepping() const; + void enableFastTimeStepping(bool x); + Scalar normalSpringConstant() const; void normalSpringConstant(Scalar x); @@ -114,6 +155,9 @@ public: Scalar tangentialBeta() const; void tangentialBeta(Scalar x); + Scalar collisionDuration() const; + void collisionDuration(Scalar x); + // set moment of inertia on start up void setMomentOfInertia(); @@ -123,6 +167,8 @@ public: // get methods for class FieldLists const FieldList& momentOfInertia() const; + const FieldList& maximumOverlap() const; + const FieldList& newMaximumOverlap() const; //**************************************************************************** @@ -133,6 +179,7 @@ public: //**************************************************************************** private: //--------------------------- Private Interface ---------------------------// + Scalar mEnableFastTimeStepping; Scalar mNormalSpringConstant; Scalar mNormalRestitutionCoefficient; Scalar mTangentialSpringConstant; @@ -146,9 +193,12 @@ private: Scalar mNormalBeta; Scalar mTangentialBeta; + Scalar mCollisionDuration; // field Lists FieldList mMomentOfInertia; + FieldList mMaximumOverlap; + FieldList mNewMaximumOverlap; // FieldList mOptimalSpringConstant; // No default constructor, copying, or assignment. diff --git a/src/DEM/LinearSpringDEMInline.hh b/src/DEM/LinearSpringDEMInline.hh index f87e570b5..90bb53884 100644 --- a/src/DEM/LinearSpringDEMInline.hh +++ b/src/DEM/LinearSpringDEMInline.hh @@ -1,5 +1,25 @@ namespace Spheral { + +//------------------------------------------------------------------------------ +// set/get to activate/deactivate fast timestepping +//------------------------------------------------------------------------------ +template +inline +bool +LinearSpringDEM:: +enableFastTimeStepping() const { + return mEnableFastTimeStepping; +} +template +inline +void +LinearSpringDEM:: +enableFastTimeStepping(bool x) { + mEnableFastTimeStepping = x; +} + + //------------------------------------------------------------------------------ // set/get our spring constant //------------------------------------------------------------------------------ @@ -217,6 +237,20 @@ tangentialBeta(typename Dimension::Scalar x) { mTangentialBeta = x; } +template +inline +typename Dimension::Scalar +LinearSpringDEM:: +collisionDuration() const { + return mCollisionDuration; +} +template +inline +void +LinearSpringDEM:: +collisionDuration(typename Dimension::Scalar x) { + mCollisionDuration = x; +} //------------------------------------------------------------------------------ // moment of interia specializations @@ -246,6 +280,69 @@ momentOfInertia(const Dim<3>::Scalar m, const Dim<3>::Scalar R) const { } +//------------------------------------------------------------------------------ +// friction functions +//------------------------------------------------------------------------------ +template +inline +void +LinearSpringDEM:: +slidingSpringDamper(const typename Dimension::Scalar k, + const typename Dimension::Scalar C, + const typename Dimension::Scalar mus, + const typename Dimension::Scalar mud, + const typename Dimension::Vector& x, + const typename Dimension::Vector& DxDt, + const typename Dimension::Scalar fnMag, + const typename Dimension::Scalar invK, + const typename Dimension::Vector& rhatij, + const bool allowSliding, + typename Dimension::Vector& xNew, + typename Dimension::Vector& forceTotal) const{ + + xNew = (x - rhatij.dot(x)*rhatij).unitVector()*x.magnitude(); + + const Vector forceSpring = - k * xNew; + const Vector forceDamper = - C * DxDt; + + forceTotal = forceSpring + forceDamper; + + if (allowSliding and (forceTotal.magnitude() > mus * fnMag)){ + forceTotal = mud*fnMag*forceTotal.unitVector(); + xNew = (forceDamper.magnitude() > mud*fnMag ? + Vector::zero : + -(forceTotal-forceDamper)*invK ); + } +} + +template +inline +void +LinearSpringDEM:: +slidingSpringDamper(const typename Dimension::Scalar k, + const typename Dimension::Scalar C, + const typename Dimension::Scalar mus, + const typename Dimension::Scalar mud, + const typename Dimension::Scalar x, + const typename Dimension::Scalar DxDt, + const typename Dimension::Scalar fnMag, + const typename Dimension::Scalar invK, + const bool allowSliding, + typename Dimension::Scalar& xNew, + typename Dimension::Scalar& forceTotal) const{ + xNew = x; + + const Scalar forceSpring = - k * xNew; + const Scalar forceDamper = - C * DxDt; + + forceTotal = forceSpring + forceDamper; + + if (allowSliding and (std::abs(forceTotal) > mus * fnMag)){ + forceTotal = (forceTotal > 0.0 ? 1.0 : -1.0) * mud * fnMag; + xNew = (std::abs(forceDamper) > mud * fnMag ? 0.0 : -(forceTotal-forceDamper)*invK); + } +} + //------------------------------------------------------------------------------ // FieldList //------------------------------------------------------------------------------ @@ -257,4 +354,20 @@ momentOfInertia() const { return mMomentOfInertia; } +template +inline +const FieldList& +LinearSpringDEM:: +maximumOverlap() const { + return mMaximumOverlap; +} + +template +inline +const FieldList& +LinearSpringDEM:: +newMaximumOverlap() const { + return mNewMaximumOverlap; +} + } diff --git a/src/DEM/SolidBoundary/CircularPlaneSolidBoundary.cc b/src/DEM/SolidBoundary/CircularPlaneSolidBoundary.cc index ec2d559dc..efda0cf5e 100644 --- a/src/DEM/SolidBoundary/CircularPlaneSolidBoundary.cc +++ b/src/DEM/SolidBoundary/CircularPlaneSolidBoundary.cc @@ -5,12 +5,17 @@ // J.M. Pearl 2023 //----------------------------------------------------------------------------// +#include "FileIO/FileIO.hh" + #include "DataBase/DataBase.hh" #include "DataBase/State.hh" #include "DataBase/StateDerivatives.hh" #include "DEM/SolidBoundary/CircularPlaneSolidBoundary.hh" +#include +using std::string; + namespace Spheral { template @@ -45,10 +50,24 @@ distance(const Vector& position) const { template typename Dimension::Vector CircularPlaneSolidBoundary:: -velocity(const Vector& position) const { +localVelocity(const Vector& position) const { return mVelocity; } +template +void +CircularPlaneSolidBoundary:: +registerState(DataBase& dataBase, + State& state) { + const auto boundaryKey = "CircularPlaneSolidBoundary_" + std::to_string(std::abs(this->uniqueIndex())); + const auto pointKey = boundaryKey +"_point"; + const auto velocityKey = boundaryKey +"_velocity"; + const auto normalKey = boundaryKey +"_normal"; + state.enrollAny(pointKey,mPoint); + state.enrollAny(pointKey,mVelocity); + state.enrollAny(pointKey,mNormal); +} + template void CircularPlaneSolidBoundary:: @@ -57,4 +76,28 @@ update(const double multiplier, const double t, const double dt) { } +//------------------------------------------------------------------------------ +// Restart +//------------------------------------------------------------------------------ +template +void +CircularPlaneSolidBoundary:: +dumpState(FileIO& file, const string& pathName) const { + file.write(mPoint, pathName + "/point"); + file.write(mNormal, pathName + "/normal"); + file.write(mExtent, pathName + "/extent"); + file.write(mVelocity, pathName + "/velocity"); +} + + +template +void +CircularPlaneSolidBoundary:: +restoreState(const FileIO& file, const string& pathName) { + file.read(mPoint, pathName + "/point"); + file.read(mNormal, pathName + "/normal"); + file.read(mExtent, pathName + "/extent"); + file.read(mVelocity, pathName + "/velocity"); +} + } \ No newline at end of file diff --git a/src/DEM/SolidBoundary/CircularPlaneSolidBoundary.hh b/src/DEM/SolidBoundary/CircularPlaneSolidBoundary.hh index f1e2c0937..173321759 100644 --- a/src/DEM/SolidBoundary/CircularPlaneSolidBoundary.hh +++ b/src/DEM/SolidBoundary/CircularPlaneSolidBoundary.hh @@ -32,7 +32,10 @@ public: ~CircularPlaneSolidBoundary(); virtual Vector distance(const Vector& position) const override; - virtual Vector velocity(const Vector& position) const override; + virtual Vector localVelocity(const Vector& position) const override; + + virtual void registerState(DataBase& dataBase, + State& state) override; virtual void update(const double multiplier, const double time, @@ -50,6 +53,10 @@ public: const Vector& velocity() const; void velocity(const Vector& value); + virtual std::string label() const { return "CircularPlaneSolidBoundary" ; } + virtual void dumpState(FileIO& file, const std::string& pathName) const override; + virtual void restoreState(const FileIO& file, const std::string& pathName) override; + protected: //-------------------------- Protected Interface --------------------------// Vector mPoint; diff --git a/src/DEM/SolidBoundary/ClippedSphereSolidBoundary.cc b/src/DEM/SolidBoundary/ClippedSphereSolidBoundary.cc new file mode 100644 index 000000000..0be718247 --- /dev/null +++ b/src/DEM/SolidBoundary/ClippedSphereSolidBoundary.cc @@ -0,0 +1,142 @@ +//---------------------------------Spheral++----------------------------------// +// ClippedSphereSolidBoundary -- N-dimensional spherical solid boundary for DEM. +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// + +#include "FileIO/FileIO.hh" + +#include "DataBase/DataBase.hh" +#include "DataBase/State.hh" +#include "DataBase/StateDerivatives.hh" + +#include "DEM/SolidBoundary/ClippedSphereSolidBoundary.hh" + +#include + +#include +using std::string; + +namespace Spheral { + +template +ClippedSphereSolidBoundary:: +ClippedSphereSolidBoundary(const Vector& center, + const Scalar radius, + const Vector& clipPoint, + const Vector& clipAxis): + SolidBoundaryBase(), + mCenter(center), + mRadius(radius), + mClipPoint(clipPoint), + mClipAxis(clipAxis), + mClipIntersectionRadius(0.0), + mVelocity(Vector::zero){ + this->setClipIntersectionRadius(); + mClipAxis = mClipAxis.unitVector(); +} + +template +ClippedSphereSolidBoundary:: +~ClippedSphereSolidBoundary(){ +} + + +template +typename Dimension::Vector +ClippedSphereSolidBoundary:: +distance(const Vector& position) const { + + // contacting sphere + const auto contactPoint = (position - mCenter).unitVector()*mRadius + mCenter; + Vector dist = position - contactPoint; + + const auto planeSignedDistance = (contactPoint - mClipPoint).dot(mClipAxis); + + // if contant pt above clip plane check for edge contact + if (planeSignedDistance > 0.0){ + + // break into perp and in-plane components + const auto q = (position-mClipPoint); + const auto qnMag = q.dot(mClipAxis); + const auto qn = qnMag * mClipAxis; + const auto qr = q - qn; + + // if outside circle enforce planar solid bc + dist = min(qr.magnitude() - mClipIntersectionRadius,0.0)*qr.unitVector() + qn; + + } + return dist; +} + +template +typename Dimension::Vector +ClippedSphereSolidBoundary:: +localVelocity(const Vector& position) const { + return mVelocity; +} + +template +void +ClippedSphereSolidBoundary:: +registerState(DataBase& dataBase, + State& state) { + + const auto boundaryKey = "ClippedSphereSolidBoundary_" + std::to_string(std::abs(this->uniqueIndex())); + const auto pointKey = boundaryKey +"_point"; + const auto clipPointKey = boundaryKey +"_clipPoint"; + const auto velocityKey = boundaryKey +"_velocity"; + + state.enrollAny(pointKey,mCenter); + state.enrollAny(clipPointKey,mClipPoint); + state.enrollAny(pointKey,mVelocity); + +} + +template +void +ClippedSphereSolidBoundary:: +update(const double multiplier, const double t, const double dt) { + mCenter += multiplier*mVelocity; + mClipPoint += multiplier*mVelocity; +} + + +template +void +ClippedSphereSolidBoundary:: +setClipIntersectionRadius() { + const auto rcMag = (mClipPoint - mCenter).dot(mClipAxis); + mClipIntersectionRadius = (rcMag < mRadius ? std::sqrt(mRadius*mRadius-rcMag*rcMag) : 0.0); + mClipPoint = rcMag * mClipAxis + mCenter; +} + +//------------------------------------------------------------------------------ +// Restart +//------------------------------------------------------------------------------ +template +void +ClippedSphereSolidBoundary:: +dumpState(FileIO& file, const string& pathName) const { + file.write(mCenter, pathName + "/center"); + file.write(mRadius, pathName + "/radius"); + file.write(mClipPoint, pathName + "/clipPoint"); + file.write(mClipAxis, pathName + "/clipAxis"); + file.write(mClipIntersectionRadius, pathName + "/clipIntersectionRadius"); + file.write(mVelocity, pathName + "/velocity"); +} + + +template +void +ClippedSphereSolidBoundary:: +restoreState(const FileIO& file, const string& pathName) { + file.read(mCenter, pathName + "/center"); + file.read(mRadius, pathName + "/radius"); + file.read(mClipPoint, pathName + "/clipPoint"); + file.read(mClipAxis, pathName + "/clipAxis"); + file.read(mClipIntersectionRadius, pathName + "/clipIntersectionRadius"); + file.read(mVelocity, pathName + "/velocity"); +} + +} \ No newline at end of file diff --git a/src/DEM/SolidBoundary/ClippedSphereSolidBoundary.hh b/src/DEM/SolidBoundary/ClippedSphereSolidBoundary.hh new file mode 100644 index 000000000..2e479685e --- /dev/null +++ b/src/DEM/SolidBoundary/ClippedSphereSolidBoundary.hh @@ -0,0 +1,88 @@ +//---------------------------------Spheral++----------------------------------// +// ClippedSphereSolidBoundary -- cylinder with finite length solid boundary for DEM +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// + +#ifndef __Spheral_ClippedSphereSolidBoundary_hh__ +#define __Spheral_ClippedSphereSolidBoundary_hh__ + +#include "DEM/SolidBoundary/SolidBoundaryBase.hh" + +namespace Spheral { + +template class State; +template class StateDerivatives; +template class DataBase; + +template +class ClippedSphereSolidBoundary : public SolidBoundaryBase { + + typedef typename Dimension::Scalar Scalar; + typedef typename Dimension::Vector Vector; + typedef typename Dimension::Tensor Tensor; + +public: + //--------------------------- Public Interface ---------------------------// + + ClippedSphereSolidBoundary(const Vector& center, + const Scalar radius, + const Vector& clipPoint, + const Vector& clipAxis); + + ~ClippedSphereSolidBoundary(); + + virtual Vector distance(const Vector& position) const override; + virtual Vector localVelocity(const Vector& position) const override; + + virtual void registerState(DataBase& dataBase, + State& state) override; + + + virtual void update(const double multiplier, + const double time, + const double dt) override; + + const Vector& center() const; + void center(const Vector& value); + + Scalar radius() const; + void radius(Scalar value); + + const Vector& clipPoint() const; + void clipPoint(const Vector& value); + + const Vector& clipAxis() const; + void clipAxis(const Vector& value); + + const Vector& velocity() const; + void velocity(const Vector& value); + + void setClipIntersectionRadius(); + + virtual std::string label() const { return "ClippedSphereSolidBoundary" ; } + virtual void dumpState(FileIO& file, const std::string& pathName) const override; + virtual void restoreState(const FileIO& file, const std::string& pathName) override; +protected: + //-------------------------- Protected Interface --------------------------// + Vector mCenter; + Scalar mRadius; + Vector mClipPoint; + Vector mClipAxis; + Scalar mClipIntersectionRadius; + + Vector mVelocity; + +private: + //--------------------------- Private Interface ---------------------------// + // No default constructor, copying, or assignment. + ClippedSphereSolidBoundary(); + ClippedSphereSolidBoundary(const ClippedSphereSolidBoundary&); + ClippedSphereSolidBoundary& operator=(const ClippedSphereSolidBoundary&); +}; + +} + +#include "ClippedSphereSolidBoundaryInline.hh" + +#endif diff --git a/src/DEM/SolidBoundary/ClippedSphereSolidBoundaryInline.hh b/src/DEM/SolidBoundary/ClippedSphereSolidBoundaryInline.hh new file mode 100644 index 000000000..220b6bc8b --- /dev/null +++ b/src/DEM/SolidBoundary/ClippedSphereSolidBoundaryInline.hh @@ -0,0 +1,87 @@ +namespace Spheral { + +template +inline +const typename Dimension::Vector& +ClippedSphereSolidBoundary:: +center() const { + return mCenter; +} + +template +inline +void +ClippedSphereSolidBoundary:: +center(const typename Dimension::Vector& value) { + mCenter=value; +} + +template +inline +typename Dimension::Scalar +ClippedSphereSolidBoundary:: +radius() const { + return mRadius; +} + +template +inline +void +ClippedSphereSolidBoundary:: +radius(typename Dimension::Scalar value) { + mRadius=value; +} + + +template +inline +const typename Dimension::Vector& +ClippedSphereSolidBoundary:: +clipPoint() const { + return mClipPoint; +} + +template +inline +void +ClippedSphereSolidBoundary:: +clipPoint(const typename Dimension::Vector& value) { + mClipPoint=value; + this->setClipIntersectionRadius(); +} + + +template +inline +const typename Dimension::Vector& +ClippedSphereSolidBoundary:: +clipAxis() const { + return mClipAxis; +} + +template +inline +void +ClippedSphereSolidBoundary:: +clipAxis(const typename Dimension::Vector& value) { + mClipAxis=value.unitVector(); + this->setClipIntersectionRadius(); +} + +template +inline +const typename Dimension::Vector& +ClippedSphereSolidBoundary:: +velocity() const { + return mVelocity; +} + +template +inline +void +ClippedSphereSolidBoundary:: +velocity(const typename Dimension::Vector& value) { + mVelocity=value; +} + +} \ No newline at end of file diff --git a/src/DEM/SolidBoundary/ClippedSphereSolidBoundaryInst.cc.py b/src/DEM/SolidBoundary/ClippedSphereSolidBoundaryInst.cc.py new file mode 100644 index 000000000..c76e1cedc --- /dev/null +++ b/src/DEM/SolidBoundary/ClippedSphereSolidBoundaryInst.cc.py @@ -0,0 +1,11 @@ +text = """ +//------------------------------------------------------------------------------ +// Explict instantiation. +//------------------------------------------------------------------------------ +#include "DEM/SolidBoundary/ClippedSphereSolidBoundary.cc" +#include "Geometry/Dimension.hh" + +namespace Spheral { + template class ClippedSphereSolidBoundary< Dim< %(ndim)s > >; +} +""" diff --git a/src/DEM/SolidBoundary/CylinderSolidBoundary.cc b/src/DEM/SolidBoundary/CylinderSolidBoundary.cc index 082258876..8280ef1c4 100644 --- a/src/DEM/SolidBoundary/CylinderSolidBoundary.cc +++ b/src/DEM/SolidBoundary/CylinderSolidBoundary.cc @@ -4,12 +4,17 @@ // J.M. Pearl 2023 //----------------------------------------------------------------------------// +#include "FileIO/FileIO.hh" + #include "DataBase/DataBase.hh" #include "DataBase/State.hh" #include "DataBase/StateDerivatives.hh" #include "DEM/SolidBoundary/CylinderSolidBoundary.hh" +#include +using std::string; + namespace Spheral { template @@ -31,7 +36,6 @@ CylinderSolidBoundary:: ~CylinderSolidBoundary(){ } - template typename Dimension::Vector CylinderSolidBoundary:: @@ -47,10 +51,24 @@ distance(const Vector& position) const { template typename Dimension::Vector CylinderSolidBoundary:: -velocity(const Vector& position) const { +localVelocity(const Vector& position) const { return mVelocity; } +template +void +CylinderSolidBoundary:: +registerState(DataBase& dataBase, + State& state) { + const auto boundaryKey = "CylinderSolidBoundary_" + std::to_string(std::abs(this->uniqueIndex())); + const auto pointKey = boundaryKey +"_point"; + const auto velocityKey = boundaryKey +"_velocity"; + //const auto normalKey = boundaryKey +"_normal"; + state.enrollAny(pointKey,mPoint); + state.enrollAny(pointKey,mVelocity); + //state.enrollAny(pointKey,mNormal); +} + template void CylinderSolidBoundary:: @@ -58,5 +76,31 @@ update(const double multiplier, const double t, const double dt) { mPoint += multiplier*mVelocity; } +//------------------------------------------------------------------------------ +// Restart +//------------------------------------------------------------------------------ +template +void +CylinderSolidBoundary:: +dumpState(FileIO& file, const string& pathName) const { + file.write(mPoint, pathName + "/point"); + file.write(mAxis, pathName + "/axis"); + file.write(mRadius, pathName + "/radius"); + file.write(mLength, pathName + "/length"); + file.write(mVelocity, pathName + "/velocity"); +} + + +template +void +CylinderSolidBoundary:: +restoreState(const FileIO& file, const string& pathName) { + file.read(mPoint, pathName + "/point"); + file.read(mAxis, pathName + "/axis"); + file.read(mRadius, pathName + "/radius"); + file.read(mLength, pathName + "/length"); + file.read(mVelocity, pathName + "/velocity"); +} + } \ No newline at end of file diff --git a/src/DEM/SolidBoundary/CylinderSolidBoundary.hh b/src/DEM/SolidBoundary/CylinderSolidBoundary.hh index da95e2cfc..7da17e8d8 100644 --- a/src/DEM/SolidBoundary/CylinderSolidBoundary.hh +++ b/src/DEM/SolidBoundary/CylinderSolidBoundary.hh @@ -33,7 +33,10 @@ public: ~CylinderSolidBoundary(); virtual Vector distance(const Vector& position) const override; - virtual Vector velocity(const Vector& position) const override; + virtual Vector localVelocity(const Vector& position) const override; + + virtual void registerState(DataBase& dataBase, + State& state) override; virtual void update(const double multiplier, const double time, @@ -54,6 +57,10 @@ public: const Vector& velocity() const; void velocity(const Vector& value); + virtual std::string label() const { return "CylinderSolidBoundary" ; } + virtual void dumpState(FileIO& file, const std::string& pathName) const override; + virtual void restoreState(const FileIO& file, const std::string& pathName) override; + protected: //-------------------------- Protected Interface --------------------------// Vector mPoint; diff --git a/src/DEM/SolidBoundary/InfinitePlaneSolidBoundary.cc b/src/DEM/SolidBoundary/InfinitePlaneSolidBoundary.cc index a15cb7a44..eb29ba5c5 100644 --- a/src/DEM/SolidBoundary/InfinitePlaneSolidBoundary.cc +++ b/src/DEM/SolidBoundary/InfinitePlaneSolidBoundary.cc @@ -4,12 +4,17 @@ // J.M. Pearl 2023 //----------------------------------------------------------------------------// +#include "FileIO/FileIO.hh" + #include "DataBase/DataBase.hh" #include "DataBase/State.hh" #include "DataBase/StateDerivatives.hh" #include "DEM/SolidBoundary/InfinitePlaneSolidBoundary.hh" +#include +using std::string; + namespace Spheral { template @@ -36,10 +41,24 @@ distance(const Vector& position) const { template typename Dimension::Vector InfinitePlaneSolidBoundary:: -velocity(const Vector& position) const { +localVelocity(const Vector& position) const { return mVelocity; } +template +void +InfinitePlaneSolidBoundary:: +registerState(DataBase& dataBase, + State& state) { + const auto boundaryKey = "InfinitePlaneSolidBoundary_" + std::to_string(std::abs(this->uniqueIndex())); + const auto pointKey = boundaryKey +"_point"; + const auto velocityKey = boundaryKey +"_velocity"; + const auto normalKey = boundaryKey +"_normal"; + state.enrollAny(pointKey,mPoint); + state.enrollAny(velocityKey,mVelocity); + state.enrollAny(normalKey,mNormal); +} + template void InfinitePlaneSolidBoundary:: @@ -47,5 +66,26 @@ update(const double multiplier, const double t, const double dt) { mPoint += multiplier*mVelocity; } +//------------------------------------------------------------------------------ +// Restart +//------------------------------------------------------------------------------ +template +void +InfinitePlaneSolidBoundary:: +dumpState(FileIO& file, const string& pathName) const { + file.write(mPoint, pathName + "/point"); + file.write(mNormal, pathName + "/normal"); + file.write(mVelocity, pathName + "/velocity"); +} + + +template +void +InfinitePlaneSolidBoundary:: +restoreState(const FileIO& file, const string& pathName) { + file.read(mPoint, pathName + "/point"); + file.read(mNormal, pathName + "/normal"); + file.read(mVelocity, pathName + "/velocity"); +} } \ No newline at end of file diff --git a/src/DEM/SolidBoundary/InfinitePlaneSolidBoundary.hh b/src/DEM/SolidBoundary/InfinitePlaneSolidBoundary.hh index b80539818..2c80a5be6 100644 --- a/src/DEM/SolidBoundary/InfinitePlaneSolidBoundary.hh +++ b/src/DEM/SolidBoundary/InfinitePlaneSolidBoundary.hh @@ -30,8 +30,11 @@ public: ~InfinitePlaneSolidBoundary(); virtual Vector distance(const Vector& position) const override; - virtual Vector velocity(const Vector& position) const override; - + virtual Vector localVelocity(const Vector& position) const override; + + virtual void registerState(DataBase& dataBase, + State& state) override; + virtual void update(const double multiplier, const double time, const double dt) override; @@ -45,6 +48,10 @@ public: const Vector& velocity() const; void velocity(const Vector& value); + virtual std::string label() const { return "InfinitePlaneSolidBoundary" ; } + virtual void dumpState(FileIO& file, const std::string& pathName) const override; + virtual void restoreState(const FileIO& file, const std::string& pathName) override; + protected: //-------------------------- Protected Interface --------------------------// Vector mPoint; diff --git a/src/DEM/SolidBoundary/RectangularPlaneSolidBoundary.cc b/src/DEM/SolidBoundary/RectangularPlaneSolidBoundary.cc index 5290b957e..9e38fc6e2 100644 --- a/src/DEM/SolidBoundary/RectangularPlaneSolidBoundary.cc +++ b/src/DEM/SolidBoundary/RectangularPlaneSolidBoundary.cc @@ -5,11 +5,16 @@ // J.M. Pearl 2023 //----------------------------------------------------------------------------// +#include "FileIO/FileIO.hh" + #include "DataBase/DataBase.hh" #include "DataBase/State.hh" #include "DataBase/StateDerivatives.hh" #include "DEM/SolidBoundary/RectangularPlaneSolidBoundary.hh" +#include +using std::string; + namespace Spheral { template @@ -40,10 +45,21 @@ distance(const Vector& position) const { template typename Dimension::Vector RectangularPlaneSolidBoundary:: -velocity(const Vector& position) const { +localVelocity(const Vector& position) const { return mVelocity; } +template +void +RectangularPlaneSolidBoundary:: +registerState(DataBase& dataBase, + State& state) { + const auto boundaryKey = "RectangularPlaneSolidBoundary_" + std::to_string(std::abs(this->uniqueIndex())); + const auto pointKey = boundaryKey +"_point"; + const auto velocityKey = boundaryKey +"_velocity"; + state.enrollAny(pointKey,mPoint); + state.enrollAny(velocityKey,mVelocity); +} template void RectangularPlaneSolidBoundary:: @@ -51,5 +67,29 @@ update(const double multiplier, const double t, const double dt) { mPoint += multiplier*mVelocity; } +//------------------------------------------------------------------------------ +// Restart +//------------------------------------------------------------------------------ +template +void +RectangularPlaneSolidBoundary:: +dumpState(FileIO& file, const string& pathName) const { + file.write(mPoint, pathName + "/point"); + file.write(mBasis, pathName + "/basis"); + file.write(mExtent, pathName + "/extent"); + file.write(mVelocity, pathName + "/velocity"); +} + + +template +void +RectangularPlaneSolidBoundary:: +restoreState(const FileIO& file, const string& pathName) { + file.read(mPoint, pathName + "/point"); + file.read(mBasis, pathName + "/basis"); + file.read(mExtent, pathName + "/extent"); + file.read(mVelocity, pathName + "/velocity"); +} + } \ No newline at end of file diff --git a/src/DEM/SolidBoundary/RectangularPlaneSolidBoundary.hh b/src/DEM/SolidBoundary/RectangularPlaneSolidBoundary.hh index 3442a6791..62c75e917 100644 --- a/src/DEM/SolidBoundary/RectangularPlaneSolidBoundary.hh +++ b/src/DEM/SolidBoundary/RectangularPlaneSolidBoundary.hh @@ -33,7 +33,10 @@ public: ~RectangularPlaneSolidBoundary(); virtual Vector distance(const Vector& position) const override; - virtual Vector velocity(const Vector& position) const override; + virtual Vector localVelocity(const Vector& position) const override; + + virtual void registerState(DataBase& dataBase, + State& state) override; virtual void update(const double multiplier, const double time, @@ -51,6 +54,10 @@ public: const Vector& velocity() const; void velocity(const Vector& value); + virtual std::string label() const { return "RectangularPlaneSolidBoundary" ; } + virtual void dumpState(FileIO& file, const std::string& pathName) const override; + virtual void restoreState(const FileIO& file, const std::string& pathName) override; + protected: //-------------------------- Protected Interface --------------------------// Vector mPoint; diff --git a/src/DEM/SolidBoundary/SolidBoundaryBase.cc b/src/DEM/SolidBoundary/SolidBoundaryBase.cc index 5bc9f98eb..5e919452a 100644 --- a/src/DEM/SolidBoundary/SolidBoundaryBase.cc +++ b/src/DEM/SolidBoundary/SolidBoundaryBase.cc @@ -20,7 +20,10 @@ namespace Spheral { template SolidBoundaryBase:: -SolidBoundaryBase(){} +SolidBoundaryBase(): + mUniqueIndex(-1), + mRestart(registerWithRestart(*this)){ +} template SolidBoundaryBase:: ~SolidBoundaryBase(){} diff --git a/src/DEM/SolidBoundary/SolidBoundaryBase.hh b/src/DEM/SolidBoundary/SolidBoundaryBase.hh index 2bd843c4f..ef8b89f5c 100644 --- a/src/DEM/SolidBoundary/SolidBoundaryBase.hh +++ b/src/DEM/SolidBoundary/SolidBoundaryBase.hh @@ -10,18 +10,30 @@ // is implemented in the DEMBase class. The boundaries // themselves simply define the surface and how it evolves // in time. -// +//----------------------------------------------------------------------------// +// ToDo +// -- add complete registration methods +// -- set the unique index of the bc so no std::str input req. for regState +// -- make restartable +// -- stateBase add scalar and tensor (talk to Mike about different pattern) +// -- tests +//----------------------------------------------------------------------------- // J.M. Pearl 2023 //----------------------------------------------------------------------------// #ifndef __Spheral_SolidBoundaryBase_hh__ #define __Spheral_SolidBoundaryBase_hh__ +#include "DataOutput/registerWithRestart.hh" + +#include + namespace Spheral { template class State; template class StateDerivatives; template class DataBase; +class FileIO; template class SolidBoundaryBase { @@ -37,13 +49,32 @@ public: virtual ~SolidBoundaryBase(); virtual Vector distance(const Vector& position) const = 0; - virtual Vector velocity(const Vector& position) const = 0; + virtual Vector localVelocity(const Vector& position) const = 0; + + virtual void registerState(DataBase& dataBase, + State& state) = 0; virtual void update(const double multiplier, const double t, const double dt) = 0; -}; + + void uniqueIndex(int uId); + int uniqueIndex() const; + + // restartability will default to no-op + virtual std::string label() const { return "SolidBoundaryBase" ; } + virtual void dumpState(FileIO& file, const std::string& pathName) const {}; + virtual void restoreState(const FileIO& file, const std::string& pathName) {}; +private: +//--------------------------- Public Interface ---------------------------// +int mUniqueIndex; + +RestartRegistrationType mRestart; + +}; } +#include "SolidBoundaryBaseInline.hh" + #endif diff --git a/src/DEM/SolidBoundary/SolidBoundaryBaseInline.hh b/src/DEM/SolidBoundary/SolidBoundaryBaseInline.hh new file mode 100644 index 000000000..8a4e4a12c --- /dev/null +++ b/src/DEM/SolidBoundary/SolidBoundaryBaseInline.hh @@ -0,0 +1,20 @@ +namespace Spheral { +//------------------------------------------------------------------------------ +// set/get unique index for the solid bc +//------------------------------------------------------------------------------ +template +inline +void +SolidBoundaryBase:: +uniqueIndex(int uId){ + mUniqueIndex = uId; +} +template +inline +int +SolidBoundaryBase:: +uniqueIndex() const { + return mUniqueIndex; +} +} + diff --git a/src/DEM/SolidBoundary/SphereSolidBoundary.cc b/src/DEM/SolidBoundary/SphereSolidBoundary.cc index b0781cff4..3a600a1a1 100644 --- a/src/DEM/SolidBoundary/SphereSolidBoundary.cc +++ b/src/DEM/SolidBoundary/SphereSolidBoundary.cc @@ -4,6 +4,8 @@ // J.M. Pearl 2023 //----------------------------------------------------------------------------// +#include "FileIO/FileIO.hh" + #include "DataBase/DataBase.hh" #include "DataBase/State.hh" #include "DataBase/StateDerivatives.hh" @@ -11,6 +13,8 @@ #include "DEM/SolidBoundary/SphereSolidBoundary.hh" #include +#include +using std::string; namespace Spheral { @@ -18,17 +22,12 @@ template SphereSolidBoundary:: SphereSolidBoundary(const Vector& center, const Scalar radius, - const Vector& clipPoint, - const Vector& clipAxis): + const RotationType& angularVelocity): SolidBoundaryBase(), mCenter(center), mRadius(radius), - mClipPoint(clipPoint), - mClipAxis(clipAxis), - mClipIntersectionRadius(0.0), - mVelocity(Vector::zero){ - this->setClipIntersectionRadius(); - mClipAxis = mClipAxis.unitVector(); + mVelocity(Vector::zero), + mAngularVelocity(angularVelocity){ } template @@ -41,51 +40,62 @@ template typename Dimension::Vector SphereSolidBoundary:: distance(const Vector& position) const { + const auto p = position - mCenter; + return p - p.unitVector()*mRadius; +} - // contacting sphere - const auto contactPoint = (position - mCenter).unitVector()*mRadius + mCenter; - Vector dist = position - contactPoint; - - const auto planeSignedDistance = (contactPoint - mClipPoint).dot(mClipAxis); +template +typename Dimension::Vector +SphereSolidBoundary:: +localVelocity(const Vector& position) const { + const auto rVector = (position - mCenter).unitVector()*mRadius; + return mVelocity + DEMDimension::cross(mAngularVelocity,rVector); +} - // if contant pt above clip plane check for edge contact - if (planeSignedDistance > 0.0){ +template +void +SphereSolidBoundary:: +registerState(DataBase& dataBase, + State& state) { - // break into perp and in-plane components - const auto q = (position-mClipPoint); - const auto qnMag = q.dot(mClipAxis); - const auto qn = qnMag * mClipAxis; - const auto qr = q - qn; + const auto boundaryKey = "SphereSolidBoundary_" + std::to_string(std::abs(this->uniqueIndex())); + const auto pointKey = boundaryKey +"_point"; + const auto velocityKey = boundaryKey +"_velocity"; - // if outside circle enforce planar solid bc - dist = min(qr.magnitude() - mClipIntersectionRadius,0.0)*qr.unitVector() + qn; + state.enrollAny(pointKey,mCenter); + state.enrollAny(pointKey,mVelocity); - } - return dist; } template -typename Dimension::Vector +void SphereSolidBoundary:: -velocity(const Vector& position) const { - return mVelocity; +update(const double multiplier, const double t, const double dt) { + mCenter += multiplier*mVelocity; } +//------------------------------------------------------------------------------ +// Restart +//------------------------------------------------------------------------------ template void SphereSolidBoundary:: -update(const double multiplier, const double t, const double dt) { - mCenter += multiplier*mVelocity; +dumpState(FileIO& file, const string& pathName) const { + file.write(mAngularVelocity, pathName + "/omega"); + file.write(mCenter, pathName + "/center"); + file.write(mRadius, pathName + "/radius"); + file.write(mVelocity, pathName + "/velocity"); } template void SphereSolidBoundary:: -setClipIntersectionRadius() { - const auto rcMag = (mClipPoint - mCenter).dot(mClipAxis); - mClipIntersectionRadius = (rcMag < mRadius ? std::sqrt(mRadius*mRadius-rcMag*rcMag) : 0.0); - mClipPoint = rcMag * mClipAxis + mCenter; +restoreState(const FileIO& file, const string& pathName) { + file.read(mAngularVelocity, pathName + "/omega"); + file.read(mCenter, pathName + "/center"); + file.read(mRadius, pathName + "/radius"); + file.read(mVelocity, pathName + "/velocity"); } } \ No newline at end of file diff --git a/src/DEM/SolidBoundary/SphereSolidBoundary.hh b/src/DEM/SolidBoundary/SphereSolidBoundary.hh index 0aa78f63b..c6a932e48 100644 --- a/src/DEM/SolidBoundary/SphereSolidBoundary.hh +++ b/src/DEM/SolidBoundary/SphereSolidBoundary.hh @@ -7,6 +7,7 @@ #ifndef __Spheral_SphereSolidBoundary_hh__ #define __Spheral_SphereSolidBoundary_hh__ +#include "DEM/DEMDimension.hh" #include "DEM/SolidBoundary/SolidBoundaryBase.hh" namespace Spheral { @@ -21,19 +22,21 @@ class SphereSolidBoundary : public SolidBoundaryBase { typedef typename Dimension::Scalar Scalar; typedef typename Dimension::Vector Vector; typedef typename Dimension::Tensor Tensor; - + typedef typename DEMDimension::AngularVector RotationType; public: //--------------------------- Public Interface ---------------------------// - SphereSolidBoundary(const Vector& center, const Scalar radius, - const Vector& clipPoint, - const Vector& clipAxis); + const RotationType& angularVelocity); ~SphereSolidBoundary(); virtual Vector distance(const Vector& position) const override; - virtual Vector velocity(const Vector& position) const override; + virtual Vector localVelocity(const Vector& position) const override; + + virtual void registerState(DataBase& dataBase, + State& state) override; + virtual void update(const double multiplier, const double time, @@ -45,26 +48,25 @@ public: Scalar radius() const; void radius(Scalar value); - const Vector& clipPoint() const; - void clipPoint(const Vector& value); - - const Vector& clipAxis() const; - void clipAxis(const Vector& value); - const Vector& velocity() const; void velocity(const Vector& value); - void setClipIntersectionRadius(); + const RotationType& angularVelocity() const; + void angularVelocity(const RotationType& value); + + virtual std::string label() const { return "SphereSolidBoundary" ; } + virtual void dumpState(FileIO& file, const std::string& pathName) const override; + virtual void restoreState(const FileIO& file, const std::string& pathName) override; + protected: //-------------------------- Protected Interface --------------------------// Vector mCenter; Scalar mRadius; - Vector mClipPoint; - Vector mClipAxis; - Scalar mClipIntersectionRadius; Vector mVelocity; + RotationType mAngularVelocity; + private: //--------------------------- Private Interface ---------------------------// // No default constructor, copying, or assignment. diff --git a/src/DEM/SolidBoundary/SphereSolidBoundaryInline.hh b/src/DEM/SolidBoundary/SphereSolidBoundaryInline.hh index 58b99a7ef..f8d4b6014 100644 --- a/src/DEM/SolidBoundary/SphereSolidBoundaryInline.hh +++ b/src/DEM/SolidBoundary/SphereSolidBoundaryInline.hh @@ -32,56 +32,36 @@ radius(typename Dimension::Scalar value) { mRadius=value; } - template inline const typename Dimension::Vector& SphereSolidBoundary:: -clipPoint() const { - return mClipPoint; -} - -template -inline -void -SphereSolidBoundary:: -clipPoint(const typename Dimension::Vector& value) { - mClipPoint=value; - this->setClipIntersectionRadius(); -} - - -template -inline -const typename Dimension::Vector& -SphereSolidBoundary:: -clipAxis() const { - return mClipAxis; +velocity() const { + return mVelocity; } template inline void SphereSolidBoundary:: -clipAxis(const typename Dimension::Vector& value) { - mClipAxis=value.unitVector(); - this->setClipIntersectionRadius(); +velocity(const typename Dimension::Vector& value) { + mVelocity=value; } template inline -const typename Dimension::Vector& +const typename DEMDimension::AngularVector& SphereSolidBoundary:: -velocity() const { - return mVelocity; +angularVelocity() const { + return mAngularVelocity; } template inline void SphereSolidBoundary:: -velocity(const typename Dimension::Vector& value) { - mVelocity=value; +angularVelocity(const typename DEMDimension::AngularVector& value) { + mAngularVelocity=value; } } \ No newline at end of file diff --git a/src/DataBase/CMakeLists.txt b/src/DataBase/CMakeLists.txt index f7cf8171d..e6dc6cee5 100644 --- a/src/DataBase/CMakeLists.txt +++ b/src/DataBase/CMakeLists.txt @@ -25,6 +25,7 @@ set(DataBase_headers IncrementStateInline.hh IncrementBoundedState.hh IncrementBoundedStateInline.hh + MaxReplaceState.hh PureReplaceState.hh PureReplaceStateInline.hh ReplaceState.hh diff --git a/src/DataBase/MaxReplaceState.hh b/src/DataBase/MaxReplaceState.hh new file mode 100644 index 000000000..099a946a9 --- /dev/null +++ b/src/DataBase/MaxReplaceState.hh @@ -0,0 +1,51 @@ +//---------------------------------Spheral++----------------------------------// +// MaxReplaceState -- Replaces the state with the max of the state/deriv value +// +// J.M. Pearl 2024 +//----------------------------------------------------------------------------// +#ifndef __Spheral_MaxReplaceState_hh__ +#define __Spheral_MaxReplaceState_hh__ + +#include "FieldUpdatePolicy.hh" +#include "Utilities/DBC.hh" + +namespace Spheral { + +// Forward declarations. +template class StateDerivatives; + +template +class MaxReplaceState: public FieldUpdatePolicy { +public: + //--------------------------- Public Interface ---------------------------// + // Useful typedefs + using KeyType = typename FieldUpdatePolicy::KeyType; + + // Constructors, destructor. + MaxReplaceState(std::initializer_list depends = {}); + virtual ~MaxReplaceState() {} + + // Overload the methods describing how to update Fields. + virtual void update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) override; + + // Equivalence. + virtual bool operator==(const UpdatePolicyBase& rhs) const override; + + static const std::string prefix() { return "new "; } + +private: + //--------------------------- Private Interface ---------------------------// + MaxReplaceState(const MaxReplaceState& rhs); + MaxReplaceState& operator=(const MaxReplaceState& rhs); +}; + +} + +#include "MaxReplaceStateInline.hh" + +#endif diff --git a/src/DataBase/MaxReplaceStateInline.hh b/src/DataBase/MaxReplaceStateInline.hh new file mode 100644 index 000000000..a69e0c813 --- /dev/null +++ b/src/DataBase/MaxReplaceStateInline.hh @@ -0,0 +1,62 @@ +//---------------------------------Spheral++----------------------------------// +// MaxReplaceState -- Replaces the state with the max of the state/deriv value +// +// J.M. Pearl 2024 +//----------------------------------------------------------------------------// +#include "State.hh" +#include "StateDerivatives.hh" +#include "Field/Field.hh" +#include "Utilities/DBC.hh" + +namespace Spheral { + +//------------------------------------------------------------------------------ +// Constructors. +//------------------------------------------------------------------------------ +template +inline +MaxReplaceState:: +MaxReplaceState(std::initializer_list depends): + FieldUpdatePolicy(depends) { +} + +//------------------------------------------------------------------------------ +// Update the field. +//------------------------------------------------------------------------------ +template +inline +void +MaxReplaceState:: +update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double /*multiplier*/, + const double /*t*/, + const double /*dt*/) { + + // Find the matching replacement field from the StateDerivatives. + const auto replaceKey = prefix() + key; + auto& f = state.field(key, ValueType()); + const auto& df = derivs.field(replaceKey, ValueType()); + + // Loop over the internal values of the field. + const auto n = f.nodeList().numInternalNodes(); +#pragma omp parallel for + for (auto i = 0u; i < n; ++i) { + f(i) = std::max(df(i),f(i)); + } +} + +//------------------------------------------------------------------------------ +// Equivalence operator. +//------------------------------------------------------------------------------ +template +inline +bool +MaxReplaceState:: +operator==(const UpdatePolicyBase& rhs) const { + return dynamic_cast*>(&rhs) != nullptr; +} + +} + diff --git a/src/DataBase/StateBase.cc b/src/DataBase/StateBase.cc index c11aa57dd..4ae5e7c99 100644 --- a/src/DataBase/StateBase.cc +++ b/src/DataBase/StateBase.cc @@ -140,7 +140,25 @@ operator==(const StateBase& rhs) const { result = false; } } catch (const boost::bad_any_cast&) { - std::cerr << "StateBase::operator== WARNING: unable to compare values for " << lhsItr->first << "\n"; + try { + auto lhsPtr = boost::any_cast(lhsItr->second); + auto rhsPtr = boost::any_cast(rhsItr->second); + if (*lhsPtr != *rhsPtr) { + cerr << "Vector for " << lhsItr->first << " don't match." << endl; + result = false; + } + } catch (const boost::bad_any_cast&) { + try { + auto lhsPtr = boost::any_cast(lhsItr->second); + auto rhsPtr = boost::any_cast(rhsItr->second); + if (*lhsPtr != *rhsPtr) { + cerr << "Scalar for " << lhsItr->first << " don't match." << endl; + result = false; + } + } catch (const boost::bad_any_cast&) { + std::cerr << "StateBase::operator== WARNING: unable to compare values for " << lhsItr->first << "\n"; + } + } } } } @@ -391,12 +409,23 @@ assign(const StateBase& rhs) { const auto rhsptr = boost::any_cast*>(anyrhs); *lhsptr = *rhsptr; } catch(const boost::bad_any_cast&) { - // We'll assume other things don't need to be assigned... - // VERIFY2(false, "StateBase::assign ERROR: unknown type for key " << itr->first << "\n"); + try { + auto lhsptr = boost::any_cast(anylhs); + const auto rhsptr = boost::any_cast(anyrhs); + *lhsptr = *rhsptr; + } catch(const boost::bad_any_cast&) { + try { + auto lhsptr = boost::any_cast(anylhs); + const auto rhsptr = boost::any_cast(anyrhs); + *lhsptr = *rhsptr; + } catch(const boost::bad_any_cast&) { + // We'll assume other things don't need to be assigned... + // VERIFY2(false, "StateBase::assign ERROR: unknown type for key " << itr->first << "\n"); + } + } } } } - // Copy the connectivity (by reference). This thing is too // big to carry around separate copies! if (rhs.mConnectivityMapPtr != NULL) { @@ -446,8 +475,16 @@ copyState() { itr->second = clone.get(); } catch (const boost::bad_any_cast&) { + try { + auto ptr = boost::any_cast(anythingPtr); + auto clone = std::shared_ptr(new Vector(*ptr)); + mCache.push_back(clone); + itr->second = clone.get(); + + } catch (const boost::bad_any_cast&) { // We'll assume other things don't need to be copied... // VERIFY2(false, "StateBase::copyState ERROR: unrecognized type for " << itr->first << "\n"); + } } } } diff --git a/src/Distributed/CMakeLists.txt b/src/Distributed/CMakeLists.txt index 60f123fb5..b7016cdcc 100644 --- a/src/Distributed/CMakeLists.txt +++ b/src/Distributed/CMakeLists.txt @@ -65,10 +65,12 @@ if (ENABLE_MPI) waitAllWithDeadlockDetection.hh ) - install(FILES mpi_mpi4py.py - DESTINATION Spheral - RENAME mpi.py - ) + if (NOT ENABLE_CXXONLY) + install(FILES mpi_mpi4py.py + DESTINATION ${SPHERAL_SITE_PACKAGES_PATH}/Spheral + RENAME mpi.py + ) + endif() spheral_add_obj_library(Distributed SPHERAL_OBJ_LIBS) @@ -76,10 +78,12 @@ else() #---------------------------------------------------------------------------- # MPI parallel off #---------------------------------------------------------------------------- - install(FILES fakempi.py - DESTINATION Spheral - RENAME mpi.py - ) + if (NOT ENABLE_CXXONLY) + install(FILES fakempi.py + DESTINATION ${SPHERAL_SITE_PACKAGES_PATH}/Spheral + RENAME mpi.py + ) + endif() endif() diff --git a/src/FSISPH/FSISPHHydros.py b/src/FSISPH/FSISPHHydros.py index 4a6ca1db5..044dacef6 100644 --- a/src/FSISPH/FSISPHHydros.py +++ b/src/FSISPH/FSISPHHydros.py @@ -4,32 +4,34 @@ dims = spheralDimensions() def FSISPH(dataBase, - W, - Q = None, - slides=None, - cfl = 0.35, - surfaceForceCoefficient=0.0, - densityStabilizationCoefficient=0.1, - specificThermalEnergyDiffusionCoefficient=0.1, - xsphCoefficient=0.0, - interfaceMethod=HLLCInterface, - kernelAveragingMethod = NeverAverageKernels, - sumDensityNodeLists=[], - useVelocityMagnitudeForDt = False, - compatibleEnergyEvolution = True, - evolveTotalEnergy = False, - linearCorrectGradients = True, - planeStrain = False, - interfacePmin = 0.0, - interfaceNeighborAngleThreshold=0.707, - HUpdate = IdealH, - densityUpdate = FSISumMassDensity, - epsTensile = 0.0, - nTensile = 4.0, - xmin = (-1e100, -1e100, -1e100), - xmax = ( 1e100, 1e100, 1e100), - ASPH = False, - RZ = False): + W, + Q = None, + slides=None, + cfl = 0.35, + surfaceForceCoefficient=0.0, + densityStabilizationCoefficient=0.1, + specificThermalEnergyDiffusionCoefficient=0.1, + xsphCoefficient=0.0, + interfaceMethod=HLLCInterface, + kernelAveragingMethod = NeverAverageKernels, + sumDensityNodeLists=[], + useVelocityMagnitudeForDt = False, + compatibleEnergyEvolution = True, + evolveTotalEnergy = False, + linearCorrectGradients = True, + planeStrain = False, + interfacePmin = 0.0, + interfaceNeighborAngleThreshold=0.707, + HUpdate = IdealH, + densityUpdate = FSISumMassDensity, + epsTensile = 0.0, + nTensile = 4.0, + xmin = (-1e100, -1e100, -1e100), + xmax = ( 1e100, 1e100, 1e100), + ASPH = False, + RZ = False, + smoothingScaleMethod = None): + ###################################################################### # some of these parameters are inactive and possible on there was out. # strengthInDamage and damageRelieveRubble are old switches and are not @@ -86,10 +88,11 @@ def FSISPH(dataBase, slides = eval("SlideSurface%id(dataBase,contactTypes)" % ndim) # Smoothing scale update - if ASPH: - smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim) - else: - smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim) + if smoothingScaleMethod is None: + if ASPH: + smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim) + else: + smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim) # Build the constructor arguments xmin = (ndim,) + xmin diff --git a/src/GSPH/CMakeLists.txt b/src/GSPH/CMakeLists.txt index 9b095b88e..09d04055b 100644 --- a/src/GSPH/CMakeLists.txt +++ b/src/GSPH/CMakeLists.txt @@ -4,23 +4,30 @@ set(GSPH_inst computeSPHVolume computeSumVolume computeMFMDensity + initializeGradients GenericRiemannHydro GSPHHydroBase MFMHydroBase + MFVHydroBase + Policies/MassFluxPolicy Policies/ReplaceWithRatioPolicy + Policies/MFVIncrementVelocityPolicy + Policies/MFVIncrementSpecificThermalEnergyPolicy + Policies/CompatibleMFVSpecificThermalEnergyPolicy Limiters/LimiterBase Limiters/VanLeerLimiter Limiters/SuperbeeLimiter Limiters/MinModLimiter Limiters/VanAlbaLimiter Limiters/OspreLimiter + Limiters/BarthJespersenLimiter WaveSpeeds/WaveSpeedBase WaveSpeeds/AcousticWaveSpeed WaveSpeeds/DavisWaveSpeed WaveSpeeds/EinfeldtWaveSpeed RiemannSolvers/RiemannSolverBase RiemannSolvers/HLLC - RiemannSolvers/GHLLC) + RiemannSolvers/SecondOrderArtificialViscosity) set(GSPH_sources GSPHFieldNames.cc) @@ -29,24 +36,31 @@ set(GSPH_headers computeSPHVolume.hh computeSumVolume.hh computeMFMDensity.hh + initializeGradients.hh GSPHFieldNames.hh GenericRiemannHydro.hh GSPHHydroBase.hh MFMHydroBase.hh + MFVHydroBase.hh + Policies/MassFluxPolicy.hh Policies/ReplaceWithRatioPolicy.hh + Policies/MFVIncrementVelocityPolicy.hh + Policies/MFVIncrementSpecificThermalEnergyPolicy.hh + Policies/CompatibleMFVSpecificThermalEnergyPolicy.hh Limiters/LimiterBase.hh Limiters/VanLeerLimiter.hh Limiters/SuperbeeLimiter.hh Limiters/MinModLimiter.hh Limiters/VanAlbaLimiter.hh Limiters/OspreLimiter.hh + Limiters/BarthJespersenLimiter.hh WaveSpeeds/WaveSpeedBase.hh WaveSpeeds/AcousticWaveSpeed.hh WaveSpeeds/DavisWaveSpeed.hh WaveSpeeds/EinfeldtWaveSpeed.hh RiemannSolvers/RiemannSolverBase.hh RiemannSolvers/HLLC.hh - RiemannSolvers/GHLLC.hh) + RiemannSolvers/SecondOrderArtificialViscosity.hh) add_subdirectory(Limiters) add_subdirectory(WaveSpeeds) diff --git a/src/GSPH/GSPHEvaluateDerivatives.cc b/src/GSPH/GSPHEvaluateDerivatives.cc index 33e16a1a9..3f464732e 100644 --- a/src/GSPH/GSPHEvaluateDerivatives.cc +++ b/src/GSPH/GSPHEvaluateDerivatives.cc @@ -67,6 +67,7 @@ evaluateDerivatives(const typename Dimension::Scalar time, // Derivative FieldLists. const auto M = derivatives.fields(HydroFieldNames::M_SPHCorrection, Tensor::zero); + const auto DrhoDx = derivatives.fields(GSPHFieldNames::densityGradient, Vector::zero); auto normalization = derivatives.fields(HydroFieldNames::normalization, 0.0); auto DxDt = derivatives.fields(IncrementState::prefix() + HydroFieldNames::position, Vector::zero); auto DrhoDt = derivatives.fields(IncrementState::prefix() + HydroFieldNames::massDensity, 0.0); @@ -83,6 +84,7 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto newRiemannDpDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannPressureGradient,Vector::zero); auto newRiemannDvDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannVelocityGradient,Tensor::zero); + CHECK(DrhoDx.size() == numNodeLists); CHECK(M.size() == numNodeLists); CHECK(normalization.size() == numNodeLists); CHECK(DxDt.size() == numNodeLists); @@ -139,7 +141,6 @@ evaluateDerivatives(const typename Dimension::Scalar time, const auto& vi = velocity(nodeListi, i); const auto& rhoi = massDensity(nodeListi, i); const auto& voli = volume(nodeListi, i); - //const auto& epsi = specificThermalEnergy(nodeListi, i); const auto& Pi = pressure(nodeListi, i); const auto& Hi = H(nodeListi, i); const auto& ci = soundSpeed(nodeListi, i); @@ -158,6 +159,7 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto& weightedNeighborSumi = weightedNeighborSum_thread(nodeListi, i); auto& massSecondMomenti = massSecondMoment_thread(nodeListi, i); auto& XSPHDeltaVi = XSPHDeltaV_thread(nodeListi,i); + const auto& gradRhoi = DrhoDx(nodeListi,i); const auto& Mi = M(nodeListi,i); @@ -169,7 +171,6 @@ evaluateDerivatives(const typename Dimension::Scalar time, const auto& vj = velocity(nodeListj, j); const auto& rhoj = massDensity(nodeListj, j); const auto& volj = volume(nodeListj, j); - //const auto& epsj = specificThermalEnergy(nodeListj, j); const auto& Pj = pressure(nodeListj, j); const auto& Hj = H(nodeListj, j); const auto& cj = soundSpeed(nodeListj, j); @@ -188,8 +189,9 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto& weightedNeighborSumj = weightedNeighborSum_thread(nodeListj, j); auto& massSecondMomentj = massSecondMoment_thread(nodeListj, j); auto& XSPHDeltaVj = XSPHDeltaV_thread(nodeListj,j); + const auto& gradRhoj = DrhoDx(nodeListj,j); const auto& Mj = M(nodeListj,j); - + // Node displacement. const auto rij = ri - rj; const auto rhatij =rij.unitVector(); @@ -233,26 +235,28 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto gradPj = riemannDpDxj; auto gradVi = riemannDvDxi; auto gradVj = riemannDvDxj; - if (gradType==GradientType::SPHSameTimeGradient){ - gradPi = newRiemannDpDxi; - gradPj = newRiemannDpDxj; - gradVi = newRiemannDvDxi; - gradVj = newRiemannDvDxj; + if (gradType==GradientType::SPHSameTimeGradient or + gradType==GradientType::SPHUncorrectedGradient){ + gradPi = newRiemannDpDx(nodeListi,i); + gradPj = newRiemannDpDx(nodeListj,j); + gradVi = newRiemannDvDx(nodeListi,i); + gradVj = newRiemannDvDx(nodeListj,j); } - riemannSolver.interfaceState(i, j, - nodeListi, nodeListj, - ri, rj, + + riemannSolver.interfaceState(ri, rj, + Hi, Hj, rhoi, rhoj, ci, cj, Peffi, Peffj, vi, vj, + gradRhoi, gradRhoj, gradPi, gradPj, gradVi, gradVj, - Pstar, - vstar, - rhostari, - rhostarj); - + Pstar, //output + vstar, //output + rhostari, //output + rhostarj); //output + // get our basis function and interface area vectors //-------------------------------------------------------- psii = volj*Wi; @@ -266,17 +270,16 @@ evaluateDerivatives(const typename Dimension::Scalar time, // acceleration //------------------------------------------------------ const auto deltaDvDt = Pstar*(Ai+Aj); - DvDti -= deltaDvDt; - DvDtj += deltaDvDt; + DvDti -= deltaDvDt/mi; + DvDtj += deltaDvDt/mj; // energy //------------------------------------------------------ const auto deltaDepsDti = 2.0*Pstar*Ai.dot(vi-vstar); const auto deltaDepsDtj = 2.0*Pstar*Aj.dot(vstar-vj); + DepsDti += deltaDepsDti/mi; + DepsDtj += deltaDepsDtj/mj; - DepsDti += deltaDepsDti; - DepsDtj += deltaDepsDtj; - if(compatibleEnergy){ const auto invmij = 1.0/(mi*mj); pairAccelerations[kk] = deltaDvDt*invmij; @@ -376,9 +379,6 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto& weightedNeighborSumi = weightedNeighborSum(nodeListi, i); auto& massSecondMomenti = massSecondMoment(nodeListi, i); - DvDti /= mi; - DepsDti /= mi; - normi += voli*Hdeti*W0; DrhoDti = - rhoi * DvDxi.Trace(); @@ -434,23 +434,26 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, const DataBase& dataBase, const State& state, StateDerivatives& derivatives) const { - // The kernels and such. const auto& W = this->kernel(); - const auto gradType = this->gradientType(); - + const auto calcSpatialGradients = (this->gradientType() == GradientType::SPHSameTimeGradient + or this->gradientType() == GradientType::SPHUncorrectedGradient); + const auto correctSpatialGradients = (this->gradientType() == GradientType::SPHSameTimeGradient); + // The connectivity. const auto& connectivityMap = dataBase.connectivityMap(); const auto& nodeLists = connectivityMap.nodeLists(); const auto numNodeLists = nodeLists.size(); // Get the state and derivative FieldLists. + const auto massDensity = state.fields(HydroFieldNames::massDensity, 0.0); const auto volume = state.fields(HydroFieldNames::volume, 0.0); const auto velocity = state.fields(HydroFieldNames::velocity, Vector::zero); const auto pressure = state.fields(HydroFieldNames::pressure, 0.0); const auto position = state.fields(HydroFieldNames::position, Vector::zero); const auto H = state.fields(HydroFieldNames::H, SymTensor::zero); + CHECK(massDensity.size() == numNodeLists); CHECK(volume.size() == numNodeLists); CHECK(velocity.size() == numNodeLists); CHECK(pressure.size() == numNodeLists); @@ -458,10 +461,12 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, CHECK(H.size() == numNodeLists); auto M = derivatives.fields(HydroFieldNames::M_SPHCorrection, Tensor::zero); + auto DrhoDx = derivatives.fields(GSPHFieldNames::densityGradient, Vector::zero); auto newRiemannDpDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannPressureGradient,Vector::zero); auto newRiemannDvDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannVelocityGradient,Tensor::zero); CHECK(M.size() == numNodeLists); + CHECK(DrhoDx.size() == numNodeLists); CHECK(newRiemannDpDx.size() == numNodeLists); CHECK(newRiemannDvDx.size() == numNodeLists); @@ -476,6 +481,7 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, typename SpheralThreads::FieldListStack threadStack; auto M_thread = M.threadCopy(threadStack); + auto DrhoDx_thread = DrhoDx.threadCopy(threadStack); auto newRiemannDpDx_thread = newRiemannDpDx.threadCopy(threadStack); auto newRiemannDvDx_thread = newRiemannDvDx.threadCopy(threadStack); @@ -487,6 +493,7 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, nodeListj = pairs[kk].j_list; // Get the state for node i. + const auto& rhoi = massDensity(nodeListi, i); const auto& ri = position(nodeListi, i); const auto& voli = volume(nodeListi, i); const auto& Hi = H(nodeListi, i); @@ -495,8 +502,10 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, CHECK(Hdeti > 0.0); auto& Mi = M_thread(nodeListi, i); + auto& DrhoDxi = DrhoDx_thread(nodeListi, i); // Get the state for node j + const auto& rhoj = massDensity(nodeListj, j); const auto& rj = position(nodeListj, j); const auto& volj = volume(nodeListj, j); const auto& Hj = H(nodeListj, j); @@ -505,6 +514,7 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, CHECK(Hdetj > 0.0); auto& Mj = M_thread(nodeListj, j); + auto& DrhoDxj = DrhoDx_thread(nodeListj, j); const auto rij = ri - rj; @@ -530,12 +540,17 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, Mi -= rij.dyad(gradPsii); Mj -= rij.dyad(gradPsij); + DrhoDxi -= (rhoi - rhoj) * gradPsii; + DrhoDxj -= (rhoi - rhoj) * gradPsij; + // // based on nodal values - if (gradType == GradientType::SPHSameTimeGradient){ + if (calcSpatialGradients){ + const auto& vi = velocity(nodeListi, i); const auto& Pi = pressure(nodeListi, i); const auto& vj = velocity(nodeListj, j); const auto& Pj = pressure(nodeListj, j); + auto& newRiemannDpDxi = newRiemannDpDx_thread(nodeListi, i); auto& newRiemannDvDxi = newRiemannDvDx_thread(nodeListi, i); auto& newRiemannDpDxj = newRiemannDpDx_thread(nodeListj, j); @@ -546,6 +561,7 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, newRiemannDvDxi -= (vi-vj).dyad(gradPsii); newRiemannDvDxj -= (vi-vj).dyad(gradPsij); + } } // loop over pairs @@ -554,7 +570,7 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, } // OpenMP parallel region - // Finish up the spatial gradient calculation + // loop the nodes to finish up the spatial gradient calculation for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { const auto& nodeList = M[nodeListi]->nodeList(); const auto ni = nodeList.numInternalNodes(); @@ -562,6 +578,7 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, for (auto i = 0u; i < ni; ++i) { const auto numNeighborsi = connectivityMap.numNeighborsForNode(nodeListi, i); auto& Mi = M(nodeListi, i); + auto& DrhoDxi = DrhoDx(nodeListi, i); const auto Mdeti = std::abs(Mi.Determinant()); @@ -570,25 +587,28 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, Mi = ( goodM ? Mi.Inverse() : Tensor::one); - if (gradType == GradientType::SPHSameTimeGradient){ + DrhoDxi = Mi.Transpose()*DrhoDxi; + + if (correctSpatialGradients){ + auto& newRiemannDpDxi = newRiemannDpDx(nodeListi, i); auto& newRiemannDvDxi = newRiemannDvDx(nodeListi, i); - newRiemannDpDxi = Mi.Transpose()*newRiemannDpDxi; newRiemannDvDxi = newRiemannDvDxi*Mi; - } - } - - } + + } // if correctSpatialGradients + } // for each node + } // for each node list for (ConstBoundaryIterator boundItr = this->boundaryBegin(); boundItr != this->boundaryEnd(); ++boundItr)(*boundItr)->applyFieldListGhostBoundary(M); - if (gradType == GradientType::SPHSameTimeGradient){ + if (calcSpatialGradients){ for (ConstBoundaryIterator boundItr = this->boundaryBegin(); boundItr != this->boundaryEnd(); ++boundItr){ + (*boundItr)->applyFieldListGhostBoundary(DrhoDx); (*boundItr)->applyFieldListGhostBoundary(newRiemannDpDx); (*boundItr)->applyFieldListGhostBoundary(newRiemannDvDx); } @@ -597,6 +617,5 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, boundaryItr != this->boundaryEnd(); ++boundaryItr) (*boundaryItr)->finalizeGhostBoundary(); -} - +} // MC correction method } // spheral namespace diff --git a/src/GSPH/GSPHFieldNames.cc b/src/GSPH/GSPHFieldNames.cc index 46d5b6b83..5b7d9dbdb 100644 --- a/src/GSPH/GSPHFieldNames.cc +++ b/src/GSPH/GSPHFieldNames.cc @@ -6,9 +6,13 @@ #include "GSPHFieldNames.hh" +const std::string Spheral::GSPHFieldNames::nodalVelocity = "velocity of node"; +const std::string Spheral::GSPHFieldNames::momentum = "momentum"; +const std::string Spheral::GSPHFieldNames::thermalEnergy = "thermal energy"; const std::string Spheral::GSPHFieldNames::densityGradient = "density gradient"; const std::string Spheral::GSPHFieldNames::pressureGradient = "pressure gradient"; const std::string Spheral::GSPHFieldNames::deviatoricStressTensorGradient = "deviatoric stress tensor gradient"; const std::string Spheral::GSPHFieldNames::RiemannPressureGradient = "Riemann solvers pressure gradient"; const std::string Spheral::GSPHFieldNames::RiemannVelocityGradient = "Riemann solvers velocity gradient"; -const std::string Spheral::GSPHFieldNames::RiemannDeviatoricStressTensorGradient = "Riemann solvers deviatoric stress tensor gradient"; \ No newline at end of file +const std::string Spheral::GSPHFieldNames::RiemannDeviatoricStressTensorGradient = "Riemann solvers deviatoric stress tensor gradient"; +const std::string Spheral::GSPHFieldNames::pairMassFlux = "pairwise mass flux"; \ No newline at end of file diff --git a/src/GSPH/GSPHFieldNames.hh b/src/GSPH/GSPHFieldNames.hh index 07421567f..d87846995 100644 --- a/src/GSPH/GSPHFieldNames.hh +++ b/src/GSPH/GSPHFieldNames.hh @@ -12,12 +12,16 @@ namespace Spheral { struct GSPHFieldNames { + static const std::string nodalVelocity; + static const std::string momentum; + static const std::string thermalEnergy; static const std::string densityGradient; static const std::string pressureGradient; static const std::string deviatoricStressTensorGradient; static const std::string RiemannPressureGradient; static const std::string RiemannVelocityGradient; static const std::string RiemannDeviatoricStressTensorGradient; + static const std::string pairMassFlux; }; } diff --git a/src/GSPH/GSPHHydroBase.cc b/src/GSPH/GSPHHydroBase.cc index e4e1679eb..ee809189c 100644 --- a/src/GSPH/GSPHHydroBase.cc +++ b/src/GSPH/GSPHHydroBase.cc @@ -1,5 +1,8 @@ //---------------------------------Spheral++----------------------------------// -// GSPHHydroBase -- The Godunov SPH hydrodynamic package for Spheral++. +// GSPHHydroBase -- A Riemann-solver-based implementation of SPH. Compared to +// MFM/MFV this approach requires a larger neighbor set. 2.5 +// nodes per kernel extent instead of 2-2.25 for MFM/MFV but +// does perform better on certain tests (Noh implosion) // // J.M. Pearl 2021 //----------------------------------------------------------------------------// @@ -207,11 +210,8 @@ initialize(const typename Dimension::Scalar time, State& state, StateDerivatives& derivs) { TIME_BEGIN("GSPHinitialize"); - GenericRiemannHydro::initialize(time,dt,dataBase,state,derivs); - TIME_END("GSPHinitialize"); - } //------------------------------------------------------------------------------ @@ -239,9 +239,7 @@ GSPHHydroBase:: applyGhostBoundaries(State& state, StateDerivatives& derivs) { TIME_BEGIN("GSPHghostBounds"); - GenericRiemannHydro::applyGhostBoundaries(state,derivs); - TIME_END("GSPHghostBounds"); } @@ -254,9 +252,7 @@ GSPHHydroBase:: enforceBoundaries(State& state, StateDerivatives& derivs) { TIME_BEGIN("GSPHenforceBounds"); - GenericRiemannHydro::enforceBoundaries(state,derivs); - TIME_END("GSPHenforceBounds"); } diff --git a/src/GSPH/GSPHHydroBase.hh b/src/GSPH/GSPHHydroBase.hh index 964c70472..0137c768b 100644 --- a/src/GSPH/GSPHHydroBase.hh +++ b/src/GSPH/GSPHHydroBase.hh @@ -1,5 +1,8 @@ //---------------------------------Spheral++----------------------------------// -// GSPHHydroBase -- The Godunov SPH hydrodynamic package for Spheral++. +// GSPHHydroBase -- A Riemann-solver-based implementation of SPH. Compared to +// MFM/MFV this approach requires a larger neighbor set. 2.5 +// nodes per kernel extent instead of 2-2.25 for MFM/MFV but +// does perform better on certain tests (Noh implosion) // // J.M. Pearl 2021 //----------------------------------------------------------------------------// diff --git a/src/GSPH/GSPHHydros.py b/src/GSPH/GSPHHydros.py index 618871580..31166132b 100644 --- a/src/GSPH/GSPHHydros.py +++ b/src/GSPH/GSPHHydros.py @@ -3,118 +3,6 @@ from spheralDimensions import spheralDimensions dims = spheralDimensions() -#------------------------------------------------------------------------------- -# density-based GSPH factory string -#------------------------------------------------------------------------------- -GSPHHydroFactoryString = """ -class %(classname)s%(dim)s(GSPHHydroBase%(dim)s): - - def __init__(self, - dataBase, - riemannSolver, - W, - epsDiffusionCoeff = 0.0, - cfl = 0.25, - useVelocityMagnitudeForDt = False, - compatibleEnergyEvolution = True, - evolveTotalEnergy = False, - XSPH = True, - correctVelocityGradient = True, - gradientType = HydroAccelerationGradient, - densityUpdate = IntegrateDensity, - HUpdate = IdealH, - epsTensile = 0.0, - nTensile = 4.0, - xmin = Vector%(dim)s(-1e100, -1e100, -1e100), - xmax = Vector%(dim)s( 1e100, 1e100, 1e100)): - self._smoothingScaleMethod = %(smoothingScaleMethod)s%(dim)s() - GSPHHydroBase%(dim)s.__init__(self, - self._smoothingScaleMethod, - dataBase, - riemannSolver, - W, - epsDiffusionCoeff, - cfl, - useVelocityMagnitudeForDt, - compatibleEnergyEvolution, - evolveTotalEnergy, - XSPH, - correctVelocityGradient, - gradientType, - densityUpdate, - HUpdate, - epsTensile, - nTensile, - xmin, - xmax) - return -""" - -#------------------------------------------------------------------------------- -# volume-based GSPH factory string (MFM) -#------------------------------------------------------------------------------- -MFMHydroFactoryString = """ -class %(classname)s%(dim)s(MFMHydroBase%(dim)s): - - def __init__(self, - dataBase, - riemannSolver, - W, - epsDiffusionCoeff = 0.0, - cfl = 0.25, - useVelocityMagnitudeForDt = False, - compatibleEnergyEvolution = True, - evolveTotalEnergy = False, - XSPH = True, - correctVelocityGradient = True, - gradientType = HydroAccelerationGradient, - densityUpdate = IntegrateDensity, - HUpdate = IdealH, - epsTensile = 0.0, - nTensile = 4.0, - xmin = Vector%(dim)s(-1e100, -1e100, -1e100), - xmax = Vector%(dim)s( 1e100, 1e100, 1e100)): - self._smoothingScaleMethod = %(smoothingScaleMethod)s%(dim)s() - MFMHydroBase%(dim)s.__init__(self, - self._smoothingScaleMethod, - dataBase, - riemannSolver, - W, - epsDiffusionCoeff, - cfl, - useVelocityMagnitudeForDt, - compatibleEnergyEvolution, - evolveTotalEnergy, - XSPH, - correctVelocityGradient, - gradientType, - densityUpdate, - HUpdate, - epsTensile, - nTensile, - xmin, - xmax) - return -""" - -#------------------------------------------------------------------------------- -# Make 'em. -#------------------------------------------------------------------------------- -for dim in dims: - exec(GSPHHydroFactoryString % {"dim" : "%id" % dim, - "classname" : "GSPHHydro", - "smoothingScaleMethod" : "SPHSmoothingScale"}) - exec(GSPHHydroFactoryString % {"dim" : "%id" % dim, - "classname" : "AGSPHHydro", - "smoothingScaleMethod" : "ASPHSmoothingScale"}) - - exec(MFMHydroFactoryString % {"dim" : "%id" % dim, - "classname" : "MFMHydro", - "smoothingScaleMethod" : "SPHSmoothingScale"}) - exec(MFMHydroFactoryString % {"dim" : "%id" % dim, - "classname" : "AMFMHydro", - "smoothingScaleMethod" : "ASPHSmoothingScale"}) - #------------------------------------------------------------------------------- # GSPH convience wrapper function #------------------------------------------------------------------------------- @@ -155,10 +43,13 @@ def GSPH(dataBase, print(" which will result in fluid behaviour for those nodes.") raise RuntimeError("Cannot mix solid and fluid NodeLists.") + Constructor = eval("GSPHHydroBase%id" % ndim) + + # Smoothing scale update if ASPH: - Constructor = eval("AGSPHHydro%id" % ndim) + smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim) else: - Constructor = eval("GSPHHydro%id" % ndim) + smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim) if riemannSolver is None: waveSpeedMethod = eval("DavisWaveSpeed%id()" % (ndim)) @@ -170,7 +61,9 @@ def GSPH(dataBase, xmin = (ndim,) + xmin xmax = (ndim,) + xmax - kwargs = {"riemannSolver" : riemannSolver, + kwargs = {"smoothingScaleMethod" : smoothingScaleMethod, + "dataBase" : dataBase, + "riemannSolver" : riemannSolver, "W" : W, "epsDiffusionCoeff" : specificThermalEnergyDiffusionCoefficient, "dataBase" : dataBase, @@ -180,7 +73,7 @@ def GSPH(dataBase, "evolveTotalEnergy" : evolveTotalEnergy, "XSPH" : XSPH, "correctVelocityGradient" : correctVelocityGradient, - "gradientType" : gradientType, + "gradType" : gradientType, "densityUpdate" : densityUpdate, "HUpdate" : HUpdate, "epsTensile" : epsTensile, @@ -191,8 +84,10 @@ def GSPH(dataBase, # Build and return the thing. result = Constructor(**kwargs) - return result + result._smoothingScaleMethod = smoothingScaleMethod + return result + #------------------------------------------------------------------------------- # MFM convience wrapper function #------------------------------------------------------------------------------- @@ -233,10 +128,14 @@ def MFM(dataBase, print(" which will result in fluid behaviour for those nodes.") raise RuntimeError("Cannot mix solid and fluid NodeLists.") + Constructor = eval("MFMHydroBase%id" % ndim) + + # Smoothing scale update if ASPH: - Constructor = eval("AMFMHydro%id" % ndim) + smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim) else: - Constructor = eval("MFMHydro%id" % ndim) + smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim) + if riemannSolver is None: waveSpeedMethod = eval("DavisWaveSpeed%id()" % (ndim)) @@ -248,7 +147,9 @@ def MFM(dataBase, xmin = (ndim,) + xmin xmax = (ndim,) + xmax - kwargs = {"riemannSolver" : riemannSolver, + kwargs = {"smoothingScaleMethod" : smoothingScaleMethod, + "dataBase" : dataBase, + "riemannSolver" : riemannSolver, "W" : W, "epsDiffusionCoeff" : specificThermalEnergyDiffusionCoefficient, "dataBase" : dataBase, @@ -258,7 +159,7 @@ def MFM(dataBase, "evolveTotalEnergy" : evolveTotalEnergy, "XSPH" : XSPH, "correctVelocityGradient" : correctVelocityGradient, - "gradientType" : gradientType, + "gradType" : gradientType, "densityUpdate" : densityUpdate, "HUpdate" : HUpdate, "epsTensile" : epsTensile, @@ -269,5 +170,99 @@ def MFM(dataBase, # Build and return the thing. result = Constructor(**kwargs) + result._smoothingScaleMethod = smoothingScaleMethod + + return result + + + +#------------------------------------------------------------------------------- +# MFM convience wrapper function +#------------------------------------------------------------------------------- +def MFV(dataBase, + W, + riemannSolver=None, + specificThermalEnergyDiffusionCoefficient = 0.0, + cfl = 0.25, + nodeMotionCoefficient = 0.2, + nodeMotionType = NodeMotionType.Lagrangian, + gradientType = HydroAccelerationGradient, + densityUpdate = IntegrateDensity, + useVelocityMagnitudeForDt = False, + compatibleEnergyEvolution = True, + evolveTotalEnergy = False, + XSPH = False, + correctVelocityGradient = False, + HUpdate = IdealH, + epsTensile = 0.0, + nTensile = 4.0, + damageRelieveRubble = False, + negativePressureInDamage = False, + strengthInDamage = False, + xmin = (-1e100, -1e100, -1e100), + xmax = ( 1e100, 1e100, 1e100), + ASPH = False, + RZ = False): + + # for now we'll just piggy back off this enum + assert densityUpdate in (RigorousSumDensity,IntegrateDensity) + + # We use the provided DataBase to sniff out what sort of NodeLists are being + # used, and based on this determine which SPH object to build. + ndim = dataBase.nDim + nfluid = dataBase.numFluidNodeLists + nsolid = dataBase.numSolidNodeLists + if nsolid > 0 and nsolid != nfluid: + print("MFM Error: you have provided both solid and fluid NodeLists, which is currently not supported.") + print(" If you want some fluids active, provide SolidNodeList without a strength option specfied,") + print(" which will result in fluid behaviour for those nodes.") + raise RuntimeError("Cannot mix solid and fluid NodeLists.") + + Constructor = eval("MFVHydroBase%id" % ndim) + + # Smoothing scale update + if ASPH: + smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim) + else: + smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim) + + + if riemannSolver is None: + waveSpeedMethod = eval("DavisWaveSpeed%id()" % (ndim)) + slopeLimiter = eval("VanLeerLimiter%id()" % (ndim)) + linearReconstruction=True + riemannSolver = eval("HLLC%id(slopeLimiter,waveSpeedMethod,linearReconstruction)" % (ndim)) + + # Build the constructor arguments + xmin = (ndim,) + xmin + xmax = (ndim,) + xmax + + kwargs = {"smoothingScaleMethod" : smoothingScaleMethod, + "dataBase" : dataBase, + "riemannSolver" : riemannSolver, + "W" : W, + "epsDiffusionCoeff" : specificThermalEnergyDiffusionCoefficient, + "dataBase" : dataBase, + "cfl" : cfl, + "useVelocityMagnitudeForDt" : useVelocityMagnitudeForDt, + "compatibleEnergyEvolution" : compatibleEnergyEvolution, + "evolveTotalEnergy" : evolveTotalEnergy, + "XSPH" : XSPH, + "correctVelocityGradient" : correctVelocityGradient, + "nodeMotionCoefficient" : nodeMotionCoefficient, + "nodeMotionType" : nodeMotionType, + "gradType" : gradientType, + "densityUpdate" : densityUpdate, + "HUpdate" : HUpdate, + "epsTensile" : epsTensile, + "nTensile" : nTensile, + "xmin" : eval("Vector%id(%g, %g, %g)" % xmin), + "xmax" : eval("Vector%id(%g, %g, %g)" % xmax)} + + #print(nodeMotionType) + # Build and return the thing. + result = Constructor(**kwargs) + result._smoothingScaleMethod = smoothingScaleMethod + #print(result.nodeMotionType) return result diff --git a/src/GSPH/GenericRiemannHydro.cc b/src/GSPH/GenericRiemannHydro.cc index 6c9ea80c9..b072e6f04 100644 --- a/src/GSPH/GenericRiemannHydro.cc +++ b/src/GSPH/GenericRiemannHydro.cc @@ -14,6 +14,7 @@ #include "DataBase/StateDerivatives.hh" #include "DataBase/IncrementState.hh" #include "DataBase/ReplaceState.hh" +#include "DataBase/PureReplaceState.hh" #include "DataBase/IncrementBoundedState.hh" #include "DataBase/ReplaceBoundedState.hh" #include "DataBase/updateStateFields.hh" @@ -35,6 +36,7 @@ #include "GSPH/GSPHFieldNames.hh" #include "GSPH/GenericRiemannHydro.hh" #include "GSPH/computeSPHVolume.hh" +#include "GSPH/initializeGradients.hh" #include "GSPH/RiemannSolvers/RiemannSolverBase.hh" #ifdef _OPENMP @@ -50,6 +52,7 @@ using std::string; using std::pair; using std::to_string; using std::make_pair; +using std::make_shared; namespace { @@ -119,9 +122,10 @@ GenericRiemannHydro(const SmoothingScaleBase& smoothingScaleMethod, mXSPHDeltaV(FieldStorageType::CopyFields), mM(FieldStorageType::CopyFields), mDxDt(FieldStorageType::CopyFields), - mDvDt(FieldStorageType::CopyFields), - mDspecificThermalEnergyDt(FieldStorageType::CopyFields), - mDHDt(FieldStorageType::CopyFields), + mDvDt(FieldStorageType::CopyFields), // move up one layer + mDspecificThermalEnergyDt(FieldStorageType::CopyFields), // move up one layer + mDHDt(FieldStorageType::CopyFields), + mDrhoDx(FieldStorageType::CopyFields), mDvDx(FieldStorageType::CopyFields), mRiemannDpDx(FieldStorageType::CopyFields), mRiemannDvDx(FieldStorageType::CopyFields), @@ -146,6 +150,7 @@ GenericRiemannHydro(const SmoothingScaleBase& smoothingScaleMethod, mDvDt = dataBase.newFluidFieldList(Vector::zero, HydroFieldNames::hydroAcceleration); mDspecificThermalEnergyDt = dataBase.newFluidFieldList(0.0, IncrementState::prefix() + HydroFieldNames::specificThermalEnergy); mDHDt = dataBase.newFluidFieldList(SymTensor::zero, IncrementState::prefix() + HydroFieldNames::H); + mDrhoDx = dataBase.newFluidFieldList(Vector::zero,GSPHFieldNames::densityGradient); mDvDx = dataBase.newFluidFieldList(Tensor::zero, HydroFieldNames::velocityGradient); mRiemannDpDx = dataBase.newFluidFieldList(Vector::zero,GSPHFieldNames::RiemannPressureGradient); mRiemannDvDx = dataBase.newFluidFieldList(Tensor::zero,GSPHFieldNames::RiemannVelocityGradient); @@ -174,16 +179,43 @@ initializeProblemStartupDependencies(DataBase& dataBase, State& state, StateDerivatives& derivs) { - // Set the moduli. + const auto& connectivityMap = dataBase.connectivityMap(); + const auto mass = dataBase.fluidMass(); + const auto massDensity = dataBase.fluidMassDensity(); + const auto position = dataBase.fluidPosition(); + const auto H = dataBase.fluidHfield(); + auto velocity = dataBase.fluidVelocity(); + updateStateFields(HydroFieldNames::pressure, state, derivs); updateStateFields(HydroFieldNames::soundSpeed, state, derivs); - // for now initialize with SPH volume to make sure things are defined - const auto mass = dataBase.fluidMass(); - const auto massDensity = dataBase.fluidMassDensity(); computeSPHVolume(mass,massDensity,mVolume); + + for (ConstBoundaryIterator boundItr = this->boundaryBegin(); + boundItr != this->boundaryEnd(); + ++boundItr){ + (*boundItr)->applyFieldListGhostBoundary(mVolume); + (*boundItr)->applyFieldListGhostBoundary(velocity); + (*boundItr)->applyFieldListGhostBoundary(mPressure); + } + for (ConstBoundaryIterator boundaryItr = this->boundaryBegin(); + boundaryItr != this->boundaryEnd(); + ++boundaryItr) (*boundaryItr)->finalizeGhostBoundary(); + + initializeGradients(connectivityMap, + this->kernel(), + position, + H, + mVolume, + mPressure, + velocity, + mM, + mRiemannDpDx, + mRiemannDvDx); + } + //------------------------------------------------------------------------------ // Register the state we need/are going to evolve. //------------------------------------------------------------------------------ @@ -227,38 +259,42 @@ registerState(DataBase& dataBase, VERIFY2(false, "SPH ERROR: Unknown Hevolution option "); } } + + auto positionPolicy = make_shared>(); + auto pressurePolicy = make_shared>(); + auto csPolicy = make_shared>(); + auto pressureGradientPolicy = make_shared>(); + auto velocityGradientPolicy = make_shared>(); + auto velocityPolicy = make_policy>({HydroFieldNames::position,HydroFieldNames::specificThermalEnergy},true); // normal state variables state.enroll(mTimeStepMask); state.enroll(mVolume); state.enroll(mass); state.enroll(massDensity); - state.enroll(position, std::make_shared>()); - state.enroll(mPressure, std::make_shared>()); - state.enroll(mSoundSpeed, std::make_shared>()); - state.enroll(mRiemannDpDx); - state.enroll(mRiemannDvDx); + state.enroll(position, positionPolicy); + state.enroll(mPressure, pressurePolicy); + state.enroll(mSoundSpeed, csPolicy); + state.enroll(velocity, velocityPolicy); + + if (mRiemannSolver.linearReconstruction()){ + state.enroll(mRiemannDpDx, pressureGradientPolicy); + state.enroll(mRiemannDvDx, velocityGradientPolicy); + }else{ + state.enroll(mRiemannDpDx); + state.enroll(mRiemannDvDx); + } // conditional for energy method if (mCompatibleEnergyEvolution) { - - state.enroll(specificThermalEnergy, std::make_shared>(dataBase)); - state.enroll(velocity, make_policy>({HydroFieldNames::position, - HydroFieldNames::specificThermalEnergy}, - true)); - } else if (mEvolveTotalEnergy) { - - state.enroll(specificThermalEnergy, std::make_shared>()); - state.enroll(velocity, make_policy>({HydroFieldNames::position, - HydroFieldNames::specificThermalEnergy}, - true)); - + auto thermalEnergyPolicy = make_shared>(dataBase); + state.enroll(specificThermalEnergy, thermalEnergyPolicy); + }else if (mEvolveTotalEnergy) { + auto thermalEnergyPolicy = make_shared>(); + state.enroll(specificThermalEnergy, thermalEnergyPolicy); } else { - - state.enroll(specificThermalEnergy, std::make_shared>()); - state.enroll(velocity, make_policy>({HydroFieldNames::position}, - true)); - + auto thermalEnergyPolicy = make_shared>(); + state.enroll(specificThermalEnergy, thermalEnergyPolicy); } } @@ -285,6 +321,7 @@ registerDerivatives(DataBase& dataBase, dataBase.resizeFluidFieldList(mDspecificThermalEnergyDt, 0.0, IncrementState::prefix() + HydroFieldNames::specificThermalEnergy, false); dataBase.resizeFluidFieldList(mDHDt, SymTensor::zero, IncrementState::prefix() + HydroFieldNames::H, false); dataBase.resizeFluidFieldList(mDvDx, Tensor::zero, HydroFieldNames::velocityGradient, false); + dataBase.resizeFluidFieldList(mDrhoDx, Vector::zero, GSPHFieldNames::densityGradient, false); dataBase.resizeFluidFieldList(mM, Tensor::zero, HydroFieldNames::M_SPHCorrection, false); // Check if someone already registered DxDt. @@ -294,6 +331,7 @@ registerDerivatives(DataBase& dataBase, } // Check that no-one else is trying to control the hydro vote for DvDt. CHECK(not derivs.registered(mDvDt)); + derivs.enroll(mDrhoDx); derivs.enroll(mNewRiemannDpDx); derivs.enroll(mNewRiemannDvDx); derivs.enroll(mDvDt); @@ -488,51 +526,6 @@ initialize(const typename Dimension::Scalar time, const DataBase& dataBase, State& state, StateDerivatives& derivs) { - - auto& riemannSolver = this->riemannSolver(); - // const auto& W = this->kernel(); - - // riemannSolver.initialize(dataBase, - // state, - // derivs, - // this->boundaryBegin(), - // this->boundaryEnd(), - // time, - // dt, - // W); - - if(riemannSolver.linearReconstruction()){ - const auto& connectivityMap = dataBase.connectivityMap(); - const auto& nodeLists = connectivityMap.nodeLists(); - const auto numNodeLists = nodeLists.size(); - - // copy from previous time step - for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { - const auto& nodeList = nodeLists[nodeListi]; - const auto ni = nodeList->numInternalNodes(); - #pragma omp parallel for - for (auto i = 0u; i < ni; ++i) { - const auto DvDxi = mNewRiemannDvDx(nodeListi,i); - const auto DpDxi = mNewRiemannDpDx(nodeListi,i); - - mRiemannDvDx(nodeListi,i) = DvDxi; - mRiemannDpDx(nodeListi,i) = DpDxi; - - } - } - - for (auto boundItr =this->boundaryBegin(); - boundItr != this->boundaryEnd(); - ++boundItr) { - (*boundItr)->applyFieldListGhostBoundary(mRiemannDvDx); - (*boundItr)->applyFieldListGhostBoundary(mRiemannDpDx); - } - - for (auto boundItr = this->boundaryBegin(); - boundItr != this->boundaryEnd(); - ++boundItr) (*boundItr)->finalizeGhostBoundary(); - - } // if LinearReconstruction } @@ -568,8 +561,8 @@ template void GenericRiemannHydro:: applyGhostBoundaries(State& state, - StateDerivatives& /*derivs*/) { - // Apply boundary conditions to the basic fluid state Fields. + StateDerivatives& derivs) { + auto volume = state.fields(HydroFieldNames::volume, 0.0); auto mass = state.fields(HydroFieldNames::mass, 0.0); auto massDensity = state.fields(HydroFieldNames::massDensity, 0.0); @@ -577,8 +570,6 @@ applyGhostBoundaries(State& state, auto velocity = state.fields(HydroFieldNames::velocity, Vector::zero); auto pressure = state.fields(HydroFieldNames::pressure, 0.0); auto soundSpeed = state.fields(HydroFieldNames::soundSpeed, 0.0); - - // our store vars in the riemann solver auto DpDx = state.fields(GSPHFieldNames::RiemannPressureGradient,Vector::zero); auto DvDx = state.fields(GSPHFieldNames::RiemannVelocityGradient,Tensor::zero); @@ -605,9 +596,8 @@ template void GenericRiemannHydro:: enforceBoundaries(State& state, - StateDerivatives& /*derivs*/) { + StateDerivatives& derivs) { - // Enforce boundary conditions on the fluid state Fields. auto volume = state.fields(HydroFieldNames::volume, 0.0); auto mass = state.fields(HydroFieldNames::mass, 0.0); auto massDensity = state.fields(HydroFieldNames::massDensity, 0.0); @@ -615,12 +605,9 @@ enforceBoundaries(State& state, auto velocity = state.fields(HydroFieldNames::velocity, Vector::zero); auto pressure = state.fields(HydroFieldNames::pressure, 0.0); auto soundSpeed = state.fields(HydroFieldNames::soundSpeed, 0.0); - - // our store vars in the riemann solver auto DpDx = state.fields(GSPHFieldNames::RiemannPressureGradient,Vector::zero); auto DvDx = state.fields(GSPHFieldNames::RiemannVelocityGradient,Tensor::zero); - for (ConstBoundaryIterator boundaryItr = this->boundaryBegin(); boundaryItr != this->boundaryEnd(); ++boundaryItr) { @@ -646,6 +633,7 @@ GenericRiemannHydro:: dumpState(FileIO& file, const string& pathName) const { file.write(mTimeStepMask, pathName + "/timeStepMask"); + file.write(mVolume, pathName + "/volume"); file.write(mPressure, pathName + "/pressure"); file.write(mSoundSpeed, pathName + "/soundSpeed"); @@ -682,6 +670,7 @@ GenericRiemannHydro:: restoreState(const FileIO& file, const string& pathName) { file.read(mTimeStepMask, pathName + "/timeStepMask"); + file.read(mVolume, pathName + "/volume"); file.read(mPressure, pathName + "/pressure"); file.read(mSoundSpeed, pathName + "/soundSpeed"); diff --git a/src/GSPH/GenericRiemannHydro.hh b/src/GSPH/GenericRiemannHydro.hh index 971370eba..e4bd66862 100644 --- a/src/GSPH/GenericRiemannHydro.hh +++ b/src/GSPH/GenericRiemannHydro.hh @@ -1,6 +1,6 @@ //---------------------------------Spheral++----------------------------------// // GenericRiemannHydro -- pure virtual class for hydros using a Riemann -// solver +// solver // // J.M. Pearl 2022 //----------------------------------------------------------------------------// @@ -20,7 +20,15 @@ enum class GradientType { HydroAccelerationGradient = 1, SPHGradient = 2, MixedMethodGradient = 3, - SPHSameTimeGradient = 4 + SPHSameTimeGradient = 4, + SPHUncorrectedGradient = 5, + NoGradient = 6 +}; + +enum class GSPHEvolutionType { + IdealH = 0, + IntegrateH = 1, + constantNeighborCount = 2 }; template class State; @@ -206,6 +214,7 @@ public: const std::vector& pairAccelerations() const; const std::vector& pairDepsDt() const; + const FieldList& DrhoDx() const; const FieldList& riemannDpDx() const; const FieldList& riemannDvDx() const; const FieldList& newRiemannDpDx() const; @@ -232,7 +241,7 @@ private: MassDensityType mDensityUpdate; HEvolutionType mHEvolution; - // A bunch of switches. + // A bunch of switches. bool mCompatibleEnergyEvolution; bool mEvolveTotalEnergy; bool mXSPH; @@ -266,6 +275,7 @@ private: FieldList mDspecificThermalEnergyDt; FieldList mDHDt; + FieldList mDrhoDx; FieldList mDvDx; FieldList mRiemannDpDx; FieldList mRiemannDvDx; diff --git a/src/GSPH/GenericRiemannHydroInline.hh b/src/GSPH/GenericRiemannHydroInline.hh index ab296c78f..e3264732a 100644 --- a/src/GSPH/GenericRiemannHydroInline.hh +++ b/src/GSPH/GenericRiemannHydroInline.hh @@ -643,6 +643,14 @@ specificThermalEnergyDiffusionCoefficient() const { } +template +inline +const FieldList& +GenericRiemannHydro:: +DrhoDx() const { + return mDrhoDx; +} + template inline const FieldList& diff --git a/src/GSPH/Limiters/BarthJespersenLimiter.cc b/src/GSPH/Limiters/BarthJespersenLimiter.cc new file mode 100644 index 000000000..4c6259af5 --- /dev/null +++ b/src/GSPH/Limiters/BarthJespersenLimiter.cc @@ -0,0 +1,40 @@ +//---------------------------------Spheral++----------------------------------// +// BarthJespersenLimiter +// J. Barth, D. C. Jespersen, The design and application of upwind schemes +// on unstructured meshes, in: 27th Aerospace Sciences Meetings, AIAA Paper +// 89-0366, Reno, NV, 1989. doi:10.2514/6.1989-366 +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// + +#include "BarthJespersenLimiter.hh" + +namespace Spheral { + +//------------------------------------------------------------------------------ +// Constructor +//------------------------------------------------------------------------------ +template +BarthJespersenLimiter:: +BarthJespersenLimiter(): + LimiterBase(true,true){ +} + +//------------------------------------------------------------------------------ +// Destructor +//------------------------------------------------------------------------------ +template +BarthJespersenLimiter:: +~BarthJespersenLimiter(){} + +//------------------------------------------------------------------------------ +// slope limiter +//------------------------------------------------------------------------------ +template +typename Dimension::Scalar +BarthJespersenLimiter:: +fluxLimiter(const typename Dimension::Scalar x) const { + return std::min(std::min(0.5*(x+1),2.0),2.0*x) ; +} + +} \ No newline at end of file diff --git a/src/GSPH/Limiters/BarthJespersenLimiter.hh b/src/GSPH/Limiters/BarthJespersenLimiter.hh new file mode 100644 index 000000000..ed4ea0f39 --- /dev/null +++ b/src/GSPH/Limiters/BarthJespersenLimiter.hh @@ -0,0 +1,44 @@ +//---------------------------------Spheral++----------------------------------// +// BarthJespersenLimiter +// J. Barth, D. C. Jespersen, The design and application of upwind schemes +// on unstructured meshes, in: 27th Aerospace Sciences Meetings, AIAA Paper +// 89-0366, Reno, NV, 1989. doi:10.2514/6.1989-366 +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// + +#ifndef __Spheral_BarthJespersenLimiter_hh__ +#define __Spheral_BarthJespersenLimiter_hh__ + +#include "LimiterBase.hh" + +namespace Spheral { + +template +class BarthJespersenLimiter : public LimiterBase { + +public: + + typedef typename Dimension::Scalar Scalar; + + BarthJespersenLimiter(); + + ~BarthJespersenLimiter(); + + virtual + Scalar fluxLimiter(const Scalar) const override; + +}; + + +} + +#else + +// Forward declaration. +namespace Spheral { + template class BarthJespersenLimiter; +} + +#endif + diff --git a/src/GSPH/Limiters/BarthJespersenLimiterInst.cc.py b/src/GSPH/Limiters/BarthJespersenLimiterInst.cc.py new file mode 100644 index 000000000..b7ec5c8d8 --- /dev/null +++ b/src/GSPH/Limiters/BarthJespersenLimiterInst.cc.py @@ -0,0 +1,11 @@ +text = """ +//------------------------------------------------------------------------------ +// Explicit instantiation. +//------------------------------------------------------------------------------ +#include "Geometry/Dimension.hh" +#include "GSPH/Limiters/BarthJespersenLimiter.cc" + +namespace Spheral { + template class BarthJespersenLimiter >; +} +""" diff --git a/src/GSPH/MFMEvaluateDerivatives.cc b/src/GSPH/MFMEvaluateDerivatives.cc index 75a5846a2..4e5098708 100644 --- a/src/GSPH/MFMEvaluateDerivatives.cc +++ b/src/GSPH/MFMEvaluateDerivatives.cc @@ -66,6 +66,7 @@ evaluateDerivatives(const typename Dimension::Scalar time, // Derivative FieldLists. const auto M = derivatives.fields(HydroFieldNames::M_SPHCorrection, Tensor::zero); + const auto DrhoDx = derivatives.fields(GSPHFieldNames::densityGradient, Vector::zero); auto normalization = derivatives.fields(HydroFieldNames::normalization, 0.0); auto DxDt = derivatives.fields(IncrementState::prefix() + HydroFieldNames::position, Vector::zero); auto DvolDt = derivatives.fields(IncrementState::prefix() + HydroFieldNames::volume, 0.0); @@ -82,6 +83,7 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto newRiemannDpDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannPressureGradient,Vector::zero); auto newRiemannDvDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannVelocityGradient,Tensor::zero); + CHECK(DrhoDx.size() == numNodeLists); CHECK(M.size() == numNodeLists); CHECK(normalization.size() == numNodeLists); CHECK(DxDt.size() == numNodeLists); @@ -158,6 +160,7 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto& massSecondMomenti = massSecondMoment_thread(nodeListi, i); auto& XSPHDeltaVi = XSPHDeltaV_thread(nodeListi,i); const auto& Mi = M(nodeListi,i); + const auto& gradRhoi = DrhoDx(nodeListi,i); // Get the state for node j @@ -188,10 +191,12 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto& massSecondMomentj = massSecondMoment_thread(nodeListj, j); auto& XSPHDeltaVj = XSPHDeltaV_thread(nodeListj,j); const auto& Mj = M(nodeListj,j); + const auto& gradRhoj = DrhoDx(nodeListj,j); // Node displacement. const auto rij = ri - rj; const auto rhatij =rij.unitVector(); + //const auto rMagij = rij.magnitude2(); const auto vij = vi - vj; const auto etai = Hi*rij; const auto etaj = Hj*rij; @@ -218,7 +223,8 @@ evaluateDerivatives(const typename Dimension::Scalar time, weightedNeighborSumj += std::abs(gWj); massSecondMomenti += gradWi.magnitude2()*thpt; massSecondMomentj += gradWj.magnitude2()*thpt; - + //massSecondMomenti -= voli*rij.selfdyad()*gWi/rMagij;//.magnitude2()*thpt; + //massSecondMomentj -= volj*rij.selfdyad()*gWj/rMagij;//.magnitude2()*thpt; // Determine an effective pressure including a term to fight the tensile instability. //const auto fij = epsTensile*pow(Wi/(Hdeti*WnPerh), nTensile); const auto fij = epsTensile*FastMath::pow4(Wi/(Hdeti*WnPerh)); @@ -233,25 +239,26 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto gradPj = riemannDpDxj; auto gradVi = riemannDvDxi; auto gradVj = riemannDvDxj; - if (gradType==GradientType::SPHSameTimeGradient){ - gradPi = newRiemannDpDxi; - gradPj = newRiemannDpDxj; - gradVi = newRiemannDvDxi; - gradVj = newRiemannDvDxj; + if (gradType==GradientType::SPHSameTimeGradient or + gradType==GradientType::SPHUncorrectedGradient){ + gradPi = newRiemannDpDx(nodeListi,i); + gradPj = newRiemannDpDx(nodeListj,j); + gradVi = newRiemannDvDx(nodeListi,i); + gradVj = newRiemannDvDx(nodeListj,j); } - riemannSolver.interfaceState(i, j, - nodeListi, nodeListj, - ri, rj, + riemannSolver.interfaceState(ri, rj, + Hi, Hj, rhoi, rhoj, ci, cj, Peffi, Peffj, vi, vj, + gradRhoi, gradRhoj, gradPi, gradPj, gradVi, gradVj, - Pstar, - vstar, - rhostari, - rhostarj); + Pstar, //output + vstar, //output + rhostari, //output + rhostarj); //output // get our basis function and interface area vectors //-------------------------------------------------------- @@ -265,16 +272,16 @@ evaluateDerivatives(const typename Dimension::Scalar time, // acceleration //------------------------------------------------------ const auto deltaDvDt = Pstar*Astar; - DvDti -= deltaDvDt; - DvDtj += deltaDvDt; + DvDti -= deltaDvDt/mi; + DvDtj += deltaDvDt/mj; // energy //------------------------------------------------------ const auto deltaDepsDti = Pstar*Astar.dot(vi-vstar); const auto deltaDepsDtj = Pstar*Astar.dot(vstar-vj); - DepsDti += deltaDepsDti; - DepsDtj += deltaDepsDtj; + DepsDti += deltaDepsDti/mi; + DepsDtj += deltaDepsDtj/mj; if(compatibleEnergy){ const auto invmij = 1.0/(mi*mj); @@ -345,7 +352,7 @@ evaluateDerivatives(const typename Dimension::Scalar time, const auto hmax = nodeList.hmax(); const auto hminratio = nodeList.hminratio(); const auto nPerh = nodeList.nodesPerSmoothingScale(); - + //const auto kernelExtent = nodeList.neighbor().kernelExtent(); const auto ni = nodeList.numInternalNodes(); #pragma omp parallel for for (auto i = 0u; i < ni; ++i) { @@ -373,9 +380,6 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto& weightedNeighborSumi = weightedNeighborSum(nodeListi, i); auto& massSecondMomenti = massSecondMoment(nodeListi, i); - DvDti /= mi; - DepsDti /= mi; - normi += voli*Hdeti*W0; DvolDti = voli * DvDxi.Trace() ; @@ -401,6 +405,7 @@ evaluateDerivatives(const typename Dimension::Scalar time, hmax, hminratio, nPerh); + Hideali = smoothingScale.newSmoothingScale(Hi, ri, weightedNeighborSumi, @@ -431,9 +436,11 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, const State& state, StateDerivatives& derivatives) const { + const auto calcSpatialGradients = (this->gradientType() == GradientType::SPHSameTimeGradient + or this->gradientType() == GradientType::SPHUncorrectedGradient); + const auto correctSpatialGradients = (this->gradientType() == GradientType::SPHSameTimeGradient); // The kernels and such. const auto& W = this->kernel(); - const auto gradType = this->gradientType(); // The connectivity. const auto& connectivityMap = dataBase.connectivityMap(); @@ -441,11 +448,14 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, const auto numNodeLists = nodeLists.size(); // Get the state and derivative FieldLists. + const auto massDensity = state.fields(HydroFieldNames::massDensity, 0.0); const auto volume = state.fields(HydroFieldNames::volume, 0.0); const auto velocity = state.fields(HydroFieldNames::velocity, Vector::zero); const auto pressure = state.fields(HydroFieldNames::pressure, 0.0); const auto position = state.fields(HydroFieldNames::position, Vector::zero); const auto H = state.fields(HydroFieldNames::H, SymTensor::zero); + + CHECK(massDensity.size() == numNodeLists); CHECK(volume.size() == numNodeLists); CHECK(velocity.size() == numNodeLists); CHECK(pressure.size() == numNodeLists); @@ -453,10 +463,12 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, CHECK(H.size() == numNodeLists); auto M = derivatives.fields(HydroFieldNames::M_SPHCorrection, Tensor::zero); + auto DrhoDx = derivatives.fields(GSPHFieldNames::densityGradient, Vector::zero); auto newRiemannDpDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannPressureGradient,Vector::zero); auto newRiemannDvDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannVelocityGradient,Tensor::zero); CHECK(M.size() == numNodeLists); + CHECK(DrhoDx.size() == numNodeLists); CHECK(newRiemannDpDx.size() == numNodeLists); CHECK(newRiemannDvDx.size() == numNodeLists); @@ -471,6 +483,7 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, typename SpheralThreads::FieldListStack threadStack; auto M_thread = M.threadCopy(threadStack); + auto DrhoDx_thread = DrhoDx.threadCopy(threadStack); auto newRiemannDpDx_thread = newRiemannDpDx.threadCopy(threadStack); auto newRiemannDvDx_thread = newRiemannDvDx.threadCopy(threadStack); @@ -482,6 +495,7 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, nodeListj = pairs[kk].j_list; // Get the state for node i. + const auto& rhoi = massDensity(nodeListi, i); const auto& ri = position(nodeListi, i); const auto& voli = volume(nodeListi, i); const auto& Hi = H(nodeListi, i); @@ -489,9 +503,11 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, CHECK(voli > 0.0); CHECK(Hdeti > 0.0); + auto& DrhoDxi = DrhoDx_thread(nodeListi, i); auto& Mi = M_thread(nodeListi, i); // Get the state for node j + const auto& rhoj = massDensity(nodeListj, j); const auto& rj = position(nodeListj, j); const auto& volj = volume(nodeListj, j); const auto& Hj = H(nodeListj, j); @@ -499,6 +515,7 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, CHECK(volj > 0.0); CHECK(Hdetj > 0.0); + auto& DrhoDxj = DrhoDx_thread(nodeListj, j); auto& Mj = M_thread(nodeListj, j); const auto rij = ri - rj; @@ -524,12 +541,17 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, Mi -= rij.dyad(gradPsii); Mj -= rij.dyad(gradPsij); + DrhoDxi -= (rhoi - rhoj) * gradPsii; + DrhoDxj -= (rhoi - rhoj) * gradPsij; + // // based on nodal values - if (gradType == GradientType::SPHSameTimeGradient){ + if (calcSpatialGradients){ + const auto& vi = velocity(nodeListi, i); const auto& Pi = pressure(nodeListi, i); const auto& vj = velocity(nodeListj, j); const auto& Pj = pressure(nodeListj, j); + auto& newRiemannDpDxi = newRiemannDpDx_thread(nodeListi, i); auto& newRiemannDvDxi = newRiemannDvDx_thread(nodeListi, i); auto& newRiemannDpDxj = newRiemannDpDx_thread(nodeListj, j); @@ -540,6 +562,7 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, newRiemannDvDxi -= (vi-vj).dyad(gradPsii); newRiemannDvDxj -= (vi-vj).dyad(gradPsij); + } } // loop over pairs @@ -564,12 +587,17 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, Mi = ( goodM ? Mi.Inverse() : Tensor::one); - if (gradType == GradientType::SPHSameTimeGradient){ + auto& DrhoDxi = DrhoDx(nodeListi, i); + DrhoDxi = Mi.Transpose()*DrhoDxi; + + if (correctSpatialGradients){ + auto& newRiemannDpDxi = newRiemannDpDx(nodeListi, i); auto& newRiemannDvDxi = newRiemannDvDx(nodeListi, i); newRiemannDpDxi = Mi.Transpose()*newRiemannDpDxi; newRiemannDvDxi = newRiemannDvDxi*Mi; + } } @@ -579,10 +607,11 @@ computeMCorrection(const typename Dimension::Scalar /*time*/, boundItr != this->boundaryEnd(); ++boundItr)(*boundItr)->applyFieldListGhostBoundary(M); - if (gradType == GradientType::SPHSameTimeGradient){ + if (calcSpatialGradients){ for (ConstBoundaryIterator boundItr = this->boundaryBegin(); boundItr != this->boundaryEnd(); ++boundItr){ + (*boundItr)->applyFieldListGhostBoundary(DrhoDx); (*boundItr)->applyFieldListGhostBoundary(newRiemannDpDx); (*boundItr)->applyFieldListGhostBoundary(newRiemannDvDx); } diff --git a/src/GSPH/MFMHydroBase.cc b/src/GSPH/MFMHydroBase.cc index ea00b8748..6b540aec3 100644 --- a/src/GSPH/MFMHydroBase.cc +++ b/src/GSPH/MFMHydroBase.cc @@ -87,7 +87,6 @@ MFMHydroBase(const SmoothingScaleBase& smoothingScaleMethod, mDvolumeDt = dataBase.newFluidFieldList(0.0, IncrementState::prefix() + HydroFieldNames::volume); - } //------------------------------------------------------------------------------ @@ -198,7 +197,7 @@ initialize(const typename Dimension::Scalar time, const DataBase& dataBase, State& state, StateDerivatives& derivs) { - GenericRiemannHydro::initialize(time,dt,dataBase,state,derivs); + GenericRiemannHydro::initialize(time,dt,dataBase,state,derivs); } //------------------------------------------------------------------------------ diff --git a/src/GSPH/MFVEvaluateDerivatives.cc b/src/GSPH/MFVEvaluateDerivatives.cc new file mode 100644 index 000000000..f2a5efc6a --- /dev/null +++ b/src/GSPH/MFVEvaluateDerivatives.cc @@ -0,0 +1,770 @@ +namespace Spheral { + +template +void +MFVHydroBase:: +evaluateDerivatives(const typename Dimension::Scalar time, + const typename Dimension::Scalar dt, + const DataBase& dataBase, + const State& state, + StateDerivatives& derivatives) const { + this->firstDerivativesLoop(time,dt,dataBase,state,derivatives); + this->secondDerivativesLoop(time,dt,dataBase,state,derivatives); + //this->setH(time,dt,dataBase,state,derivatves) +} +//------------------------------------------------------------------------------ +// Determine the principle derivatives. +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +secondDerivativesLoop(const typename Dimension::Scalar time, + const typename Dimension::Scalar dt, + const DataBase& dataBase, + const State& state, + StateDerivatives& derivatives) const { + + const auto& riemannSolver = this->riemannSolver(); + const auto& smoothingScale = this->smoothingScaleMethod(); + + // A few useful constants we'll use in the following loop. + const auto tiny = std::numeric_limits::epsilon(); + const auto epsTensile = this->epsilonTensile(); + const auto compatibleEnergy = this->compatibleEnergyEvolution(); + //const auto totalEnergy = this->evolveTotalEnergy(); + const auto gradType = this->gradientType(); + //const auto correctVelocityGradient = this->correctVelocityGradient(); + + // The connectivity. + const auto& connectivityMap = dataBase.connectivityMap(); + const auto& nodeLists = connectivityMap.nodeLists(); + const auto& pairs = connectivityMap.nodePairList(); + const auto npairs = pairs.size(); + const auto numNodeLists = nodeLists.size(); + const auto nPerh = nodeLists[0]->nodesPerSmoothingScale(); + + // kernel + const auto& W = this->kernel(); + const auto WnPerh = W(1.0/nPerh, 1.0); + //const auto W0 = W(0.0, 1.0); + + // Get the state and derivative FieldLists. + // State FieldLists. + const auto mass = state.fields(HydroFieldNames::mass, 0.0); + const auto position = state.fields(HydroFieldNames::position, Vector::zero); + const auto velocity = state.fields(HydroFieldNames::velocity, Vector::zero); + //const auto nodalVelocity = state.fields(GSPHFieldNames::nodalVelocity, Vector::zero); + const auto massDensity = state.fields(HydroFieldNames::massDensity, 0.0); + const auto volume = state.fields(HydroFieldNames::volume, 0.0); + const auto specificThermalEnergy = state.fields(HydroFieldNames::specificThermalEnergy, 0.0); + const auto H = state.fields(HydroFieldNames::H, SymTensor::zero); + const auto pressure = state.fields(HydroFieldNames::pressure, 0.0); + const auto soundSpeed = state.fields(HydroFieldNames::soundSpeed, 0.0); + const auto riemannDpDx = state.fields(GSPHFieldNames::RiemannPressureGradient,Vector::zero); + const auto riemannDvDx = state.fields(GSPHFieldNames::RiemannVelocityGradient,Tensor::zero); + + //CHECK(nodalVelocity.size() == numNodeLists); + CHECK(mass.size() == numNodeLists); + CHECK(position.size() == numNodeLists); + CHECK(velocity.size() == numNodeLists); + CHECK(massDensity.size() == numNodeLists); + CHECK(volume.size() == numNodeLists); + CHECK(specificThermalEnergy.size() == numNodeLists); + CHECK(H.size() == numNodeLists); + CHECK(pressure.size() == numNodeLists); + CHECK(soundSpeed.size() == numNodeLists); + CHECK(riemannDpDx.size() == numNodeLists); + CHECK(riemannDvDx.size() == numNodeLists); + + // Derivative FieldLists. + const auto M = derivatives.fields(HydroFieldNames::M_SPHCorrection, Tensor::zero); + const auto DrhoDx = derivatives.fields(GSPHFieldNames::densityGradient, Vector::zero); + auto normalization = derivatives.fields(HydroFieldNames::normalization, 0.0); + auto DxDt = derivatives.fields(IncrementState::prefix() + HydroFieldNames::position, Vector::zero); + auto DvolDt = derivatives.fields(IncrementState::prefix() + HydroFieldNames::volume, 0.0); + auto DmDt = derivatives.fields(IncrementState::prefix() + HydroFieldNames::mass, 0.0); + auto DEDt = derivatives.fields(IncrementState::prefix() + GSPHFieldNames::thermalEnergy, 0.0); + auto DpDt = derivatives.fields(IncrementState::prefix() + GSPHFieldNames::momentum, Vector::zero); + auto DvDx = derivatives.fields(HydroFieldNames::velocityGradient, Tensor::zero); + auto DHDt = derivatives.fields(IncrementState::prefix() + HydroFieldNames::H, SymTensor::zero); + auto Hideal = derivatives.fields(ReplaceBoundedState::prefix() + HydroFieldNames::H, SymTensor::zero); + auto XSPHDeltaV = derivatives.fields(HydroFieldNames::XSPHDeltaV, Vector::zero); + auto weightedNeighborSum = derivatives.fields(HydroFieldNames::weightedNeighborSum, 0.0); + auto massSecondMoment = derivatives.fields(HydroFieldNames::massSecondMoment, SymTensor::zero); + //auto HStretchTensor = derivatives.fields("HStretchTensor", SymTensor::zero); + auto newRiemannDpDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannPressureGradient,Vector::zero); + auto newRiemannDvDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannVelocityGradient,Tensor::zero); + auto& pairAccelerations = derivatives.getAny(HydroFieldNames::pairAccelerations, vector()); + auto& pairDepsDt = derivatives.getAny(HydroFieldNames::pairWork, vector()); + auto& pairMassFlux = derivatives.getAny(GSPHFieldNames::pairMassFlux, vector()); + + CHECK(DrhoDx.size() == numNodeLists); + CHECK(M.size() == numNodeLists); + CHECK(normalization.size() == numNodeLists); + CHECK(DxDt.size() == numNodeLists); + CHECK(DvolDt.size() == numNodeLists); + CHECK(DEDt.size() == numNodeLists); + CHECK(DpDt.size() == numNodeLists); + CHECK(DvDx.size() == numNodeLists); + CHECK(DHDt.size() == numNodeLists); + CHECK(Hideal.size() == numNodeLists); + //CHECK(XSPHDeltaV.size() == numNodeLists); + CHECK(weightedNeighborSum.size() == numNodeLists); + CHECK(massSecondMoment.size() == numNodeLists); + //CHECK(HStretchTensor.size() == numNodeLists); + CHECK(newRiemannDpDx.size() == numNodeLists); + CHECK(newRiemannDvDx.size() == numNodeLists); + + if (compatibleEnergy){ + pairAccelerations.resize(npairs); + pairDepsDt.resize(2*npairs); + pairMassFlux.resize(npairs); + } + + // Walk all the interacting pairs. +#pragma omp parallel + { + // Thread private scratch variables + int i, j, nodeListi, nodeListj; + Scalar Wi, Wj, gWi, gWj, Pstar, rhostari, rhostarj; + Vector vstar; + + typename SpheralThreads::FieldListStack threadStack; + //auto weightedNeighborSum_thread = weightedNeighborSum.threadCopy(threadStack); + //auto massSecondMoment_thread = massSecondMoment.threadCopy(threadStack); + auto DvolDt_thread = DvolDt.threadCopy(threadStack); + auto DmDt_thread = DmDt.threadCopy(threadStack); + auto DEDt_thread = DEDt.threadCopy(threadStack); + auto DpDt_thread = DpDt.threadCopy(threadStack); + auto DvDx_thread = DvDx.threadCopy(threadStack); + auto newRiemannDpDx_thread = newRiemannDpDx.threadCopy(threadStack); + auto newRiemannDvDx_thread = newRiemannDvDx.threadCopy(threadStack); + auto XSPHDeltaV_thread = XSPHDeltaV.threadCopy(threadStack); + //auto normalization_thread = normalization.threadCopy(threadStack); + //auto HStretchTensor_thread = HStretchTensor.threadCopy(threadStack); + + // this is kind of criminal and should be fixed, but for testing purposes + // I'm going to say its allowable. We're going to zero out the thread + // copy of the Hstretch Tensor so that we can zero it out then replace + // the original with the smoothed version. + // HStretchTensor_thread.Zero(); + +#pragma omp for + for (auto kk = 0u; kk < npairs; ++kk) { + i = pairs[kk].i_node; + j = pairs[kk].j_node; + nodeListi = pairs[kk].i_list; + nodeListj = pairs[kk].j_list; + + const auto& mi = mass(nodeListi, i); + const auto& mj = mass(nodeListj, j); + + if( mi >tiny or mj > tiny){ + + // Get the state for node i. + //const auto& ui = nodalVelocity(nodeListi,i); + const auto& riemannDpDxi = riemannDpDx(nodeListi, i); + const auto& riemannDvDxi = riemannDvDx(nodeListi, i); + const auto& ri = position(nodeListi, i); + const auto& vi = velocity(nodeListi, i); + const auto& rhoi = massDensity(nodeListi, i); + const auto& voli = volume(nodeListi, i); + const auto& epsi = specificThermalEnergy(nodeListi, i); + const auto& Pi = pressure(nodeListi, i); + const auto& Hi = H(nodeListi, i); + const auto& ci = soundSpeed(nodeListi, i); + const auto Hdeti = Hi.Determinant(); + CHECK(voli > 0.0); + CHECK(rhoi > 0.0); + CHECK(Hdeti > 0.0); + + //auto& normi = normalization_thread(nodeListi,i); + auto& DvolDti = DvolDt_thread(nodeListi,i); + auto& DmDti = DmDt_thread(nodeListi, i); + auto& DEDti = DEDt_thread(nodeListi, i); + auto& DpDti = DpDt_thread(nodeListi, i); + const auto& DxDti = DxDt(nodeListi,i); + auto& newRiemannDpDxi = newRiemannDpDx_thread(nodeListi, i); + auto& newRiemannDvDxi = newRiemannDvDx_thread(nodeListi, i); + auto& DvDxi = DvDx_thread(nodeListi, i); + //auto& weightedNeighborSumi = weightedNeighborSum_thread(nodeListi, i); + //auto& massSecondMomenti = massSecondMoment(nodeListi, i); + auto& XSPHDeltaVi = XSPHDeltaV_thread(nodeListi,i); + const auto& gradRhoi = DrhoDx(nodeListi, i); + const auto& Mi = M(nodeListi,i); + + + // Get the state for node j + //const auto& uj = nodalVelocity(nodeListj,j); + const auto& riemannDpDxj = riemannDpDx(nodeListj, j); + const auto& riemannDvDxj = riemannDvDx(nodeListj, j); + const auto& rj = position(nodeListj, j); + const auto& vj = velocity(nodeListj, j); + const auto& rhoj = massDensity(nodeListj, j); + const auto& volj = volume(nodeListj, j); + const auto& epsj = specificThermalEnergy(nodeListj, j); + const auto& Pj = pressure(nodeListj, j); + const auto& Hj = H(nodeListj, j); + const auto& cj = soundSpeed(nodeListj, j); + const auto Hdetj = Hj.Determinant(); + CHECK(rhoj > 0.0); + CHECK(volj > 0.0); + CHECK(Hdetj > 0.0); + + //auto& normj = normalization_thread(nodeListj,j); + auto& DvolDtj = DvolDt_thread(nodeListj,j); + auto& DmDtj = DmDt_thread(nodeListj, j); + auto& DEDtj = DEDt_thread(nodeListj, j); + auto& DpDtj = DpDt_thread(nodeListj, j); + const auto& DxDtj = DxDt(nodeListj,j); + auto& newRiemannDpDxj = newRiemannDpDx_thread(nodeListj,j); + auto& newRiemannDvDxj = newRiemannDvDx_thread(nodeListj,j); + auto& DvDxj = DvDx_thread(nodeListj, j); + //auto& weightedNeighborSumj = weightedNeighborSum_thread(nodeListj, j); + //auto& massSecondMomentj = massSecondMoment(nodeListj, j); + auto& XSPHDeltaVj = XSPHDeltaV_thread(nodeListj,j); + const auto& gradRhoj = DrhoDx(nodeListj, j); + const auto& Mj = M(nodeListj,j); + + // Node displacement. + const auto rij = ri - rj; + //const auto rMagij = rij.magnitude(); + //const auto rMagij2 = rij.magnitude2(); + const auto rhatij =rij.unitVector(); + const auto vij = vi - vj; + const auto etai = Hi*rij; + const auto etaj = Hj*rij; + const auto etaMagi = etai.magnitude(); + const auto etaMagj = etaj.magnitude(); + CHECK(etaMagi >= 0.0); + CHECK(etaMagj >= 0.0); + + + // Symmetrized kernel weight and gradient. + //------------------------------------------------------ + W.kernelAndGradValue(etaMagi, Hdeti, Wi, gWi); + const auto Hetai = Hi*etai.unitVector(); + const auto gradWi = gWi*Hetai; + + W.kernelAndGradValue(etaMagj, Hdetj, Wj, gWj); + const auto Hetaj = Hj*etaj.unitVector(); + const auto gradWj = gWj*Hetaj; + + const auto gradPsii = voli * Mi.Transpose()*gradWi; + const auto gradPsij = volj * Mj.Transpose()*gradWj; + + const auto Astar = voli*gradPsii + volj*gradPsij; + + // Determine an effective pressure including a term to fight the tensile instability. + //const auto fij = epsTensile*pow(Wi/(Hdeti*WnPerh), nTensile); + const auto fij = epsTensile*FastMath::pow4(Wi/(Hdeti*WnPerh)); + const auto Ri = fij*(Pi < 0.0 ? -Pi : 0.0); + const auto Rj = fij*(Pj < 0.0 ? -Pj : 0.0); + const auto Peffi = Pi + Ri; + const auto Peffj = Pj + Rj; + + // Reimann Solver and Fluxes + //------------------------------------------------------ + // we'll clean this up when we have a gradient + // implementation we're in love with + auto gradPi = riemannDpDxi; + auto gradPj = riemannDpDxj; + auto gradVi = riemannDvDxi; + auto gradVj = riemannDvDxj; + if (gradType==GradientType::SPHSameTimeGradient or + gradType==GradientType::SPHUncorrectedGradient){ + gradPi = newRiemannDpDx(nodeListi,i); + gradPj = newRiemannDpDx(nodeListj,j); + gradVi = newRiemannDvDx(nodeListi,i); + gradVj = newRiemannDvDx(nodeListj,j); + } + // need grad rho and grad eps + riemannSolver.interfaceState(ri, rj, + Hi, Hj, + rhoi, rhoj, + ci, cj, + Peffi, Peffj, + vi, vj, + gradRhoi, gradRhoj, + gradPi, gradPj, + gradVi, gradVj, + Pstar, //output + vstar, //output + rhostari, //output + rhostarj); //output + + const auto fluxSwitch = 1.0;//(nodeListi==nodeListj ? 1.0 : 0.0); + const auto vframe = (DxDti+DxDtj)*0.5; + const auto vflux = vstar-vframe; + const auto fluxTowardsNodei = vflux.dot(rhatij) > 0; + const auto rhostar = (fluxTowardsNodei ? rhostarj : rhostari); // we'll need to fix these later + const auto epsstar = (fluxTowardsNodei ? epsj : epsi); // we'll need to fix these later + + const auto massFlux = fluxSwitch * rhostar * vflux.dot(Astar); + const auto momentumFlux = massFlux * vstar; + const auto energyFlux = massFlux * epsstar; + + // mass + //------------------------------------------------------ + DmDti -= massFlux; + DmDtj += massFlux; + + // momentum + //------------------------------------------------------ + const auto deltaDvDt = Pstar*Astar + momentumFlux; + DpDti -= deltaDvDt; + DpDtj += deltaDvDt; + + // energy + //------------------------------------------------------ + const auto deltaDepsDti = Pstar*Astar.dot(vi-vstar) - energyFlux; + const auto deltaDepsDtj = Pstar*Astar.dot(vstar-vj) + energyFlux; + + DEDti += deltaDepsDti; + DEDtj += deltaDepsDtj; + + if(compatibleEnergy){ + pairMassFlux[kk] = massFlux; + pairAccelerations[kk] = deltaDvDt; + pairDepsDt[2*kk] = deltaDepsDti; + pairDepsDt[2*kk+1] = deltaDepsDtj; + } + + // volume change based on nodal velocity + //----------------------------------------------------- + DvolDti -= (DxDti-DxDtj).dot(gradPsii); + DvolDtj -= (DxDti-DxDtj).dot(gradPsij); + + // gradients + //------------------------------------------------------ + const auto deltaDvDxi = 2.0*(vi-vstar).dyad(gradPsii); + const auto deltaDvDxj = 2.0*(vstar-vj).dyad(gradPsij); + + XSPHDeltaVi -= voli*gradWi; + XSPHDeltaVj += volj*gradWj; + + // based on riemann soln + DvDxi -= deltaDvDxi; + DvDxj -= deltaDvDxj; + + // while we figure out what we want ... + switch(gradType){ + case GradientType::RiemannGradient: // default grad based on riemann soln + newRiemannDvDxi -= deltaDvDxi; + newRiemannDvDxj -= deltaDvDxj; + newRiemannDpDxi -= 2.0*(Pi-Pstar)*gradPsii; + newRiemannDpDxj -= 2.0*(Pstar-Pj)*gradPsij; + break; + case GradientType::HydroAccelerationGradient: // based on hydro accel for DpDx + newRiemannDvDxi -= deltaDvDxi; + newRiemannDvDxj -= deltaDvDxj; + newRiemannDpDxi += Pstar*Astar/voli; + newRiemannDpDxj -= Pstar*Astar/volj; + break; + case GradientType::SPHGradient: // raw gradients + newRiemannDvDxi -= (vi-vj).dyad(gradPsii); + newRiemannDvDxj -= (vi-vj).dyad(gradPsij); + newRiemannDpDxi -= (Pi-Pj)*gradPsii; + newRiemannDpDxj -= (Pi-Pj)*gradPsij; + break; + case GradientType::MixedMethodGradient: // raw gradient for P riemann gradient for v + newRiemannDvDxi -= deltaDvDxi; + newRiemannDvDxj -= deltaDvDxj; + newRiemannDpDxi -= (Pi-Pj)*gradPsii; + newRiemannDpDxj -= (Pi-Pj)*gradPsij; + break; + default: + break; + } + + } //if statement + } // loop over pairs + threadReduceFieldLists(threadStack); + } // OpenMP parallel region + + + // Finish up the derivatives for each point. + for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { + const auto& nodeList = mass[nodeListi]->nodeList(); + const auto hmin = nodeList.hmin(); + const auto hmax = nodeList.hmax(); + const auto hminratio = nodeList.hminratio(); + const auto nPerh = nodeList.nodesPerSmoothingScale(); + //const auto kernelExtent = nodeList.neighbor().kernelExtent(); + const auto ni = nodeList.numInternalNodes(); + +#pragma omp parallel for + for (auto i = 0u; i < ni; ++i) { + + // Get the state for node i. + const auto& ri = position(nodeListi, i); + const auto& voli = volume(nodeListi,i); + //const auto& ui = nodalVelocity(nodeListi,i); + //const auto& vi = velocity(nodeListi,i); + //const auto& ci = soundSpeed(nodeListi,i); + const auto& Hi = H(nodeListi, i); + const auto Hdeti = Hi.Determinant(); + CHECK(voli > 0.0); + CHECK(Hdeti > 0.0); + + //auto& normi = normalization(nodeListi, i); + //auto& DxDti = DxDt(nodeListi, i); + auto& DvolDti = DvolDt(nodeListi, i); + auto& DvDxi = DvDx(nodeListi, i); + auto& DHDti = DHDt(nodeListi, i); + auto& Hideali = Hideal(nodeListi, i); + auto& XSPHDeltaVi = XSPHDeltaV(nodeListi, i); + const auto& weightedNeighborSumi = weightedNeighborSum(nodeListi, i); + const auto& massSecondMomenti = massSecondMoment(nodeListi, i); + //const auto& HStretchTensori = HStretchTensor(nodeListi, i); + + XSPHDeltaVi /= Dimension::rootnu(Hdeti); + DvolDti *= voli; + + // If needed finish the total energy derivative. + //if (totalEnergy) DepsDti = mi*(vi.dot(DvDti) + DepsDti); + + // ----------------------------------------------- + // TODO: + // this makes ui be vi from the previous timestep. We might need a special update method for hthis + // We culd also just take care of these in the primary loop and make the node velocity a deriv + // ----------------------------------------------- + //if(true){ + DHDti = smoothingScale.smoothingScaleDerivative(Hi, + ri, + DvDxi, + hmin, + hmax, + hminratio, + nPerh); + Hideali = smoothingScale.newSmoothingScale(Hi, // Hi + ri, // ri + weightedNeighborSumi, // ? + massSecondMomenti, // Hstretch tensor + W, // W + hmin, // hmin + hmax, // hmax + hminratio, // hminratio + nPerh, // Ngb + connectivityMap, // connectivityMap + nodeListi, // nodeListi + i); // i + // }else{ + // // smoothing scale construction + // const auto Ngb_target = (Dimension::nDim == 3 ? 32 : + // (Dimension::nDim == 2 ? 16 : + // 4)); + // const auto stretchFactor = 0.00; + + // // set on construction + // const auto C = (Dimension::nDim == 3 ? 1.33333*3.1415 : + // (Dimension::nDim == 2 ? 3.1415 : + // 1.0)); + + // // pass + // const auto Ngb = C /(Hdeti*voli) * pow(kernelExtent,Dimension::nDim); + + // const auto Hstretch = ((1.00-stretchFactor)* SymTensor::one + + // stretchFactor * HStretchTensori)*Hi; + + // const auto scaleFactor = (1.0+0.5*(Ngb - Ngb_target)/Ngb_target); + // Hideali = std::min(std::max(scaleFactor,0.8),1.2) * Hstretch; + + // DHDti = 0.25*(Hideali-Hi)/dt; + // } + } // nodes loop + } // nodeLists loop +} // eval derivs method + +//------------------------------------------------------------------------------ +// EvalDerivs subroutine for spatial derivs +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +firstDerivativesLoop(const typename Dimension::Scalar /*time*/, + const typename Dimension::Scalar /*dt*/, + const DataBase& dataBase, + const State& state, + StateDerivatives& derivatives) const { + + const auto tiny = std::numeric_limits::epsilon(); + //const auto epsTensile = this->epsilonTensile(); + const auto nodeMotionCoeff = this->nodeMotionCoefficient(); + + const auto calcSpatialGradients = (this->gradientType() == GradientType::SPHSameTimeGradient + or this->gradientType() == GradientType::SPHUncorrectedGradient); + const auto correctSpatialGradients = (this->gradientType() == GradientType::SPHSameTimeGradient); + + const auto nodeMotion = this->nodeMotionType(); + const auto xsphMotion = (nodeMotion == NodeMotionType::XSPH); + const auto ficianMotion = (nodeMotion == NodeMotionType::Fician); + const auto noMotion = (nodeMotion == NodeMotionType::Eulerian); + + // The connectivity. + const auto& connectivityMap = dataBase.connectivityMap(); + const auto& nodeLists = connectivityMap.nodeLists(); + const auto numNodeLists = nodeLists.size(); + const auto& pairs = connectivityMap.nodePairList(); + const auto npairs = pairs.size(); + //const auto nPerh = nodeLists[0]->nodesPerSmoothingScale(); + + // kernel + const auto& W = this->kernel(); + //const auto WnPerh = W(1.0/nPerh, 1.0); + const auto W0 = W(0.0, 1.0); + + // Get the state and derivative FieldLists. + const auto soundSpeed = state.fields(HydroFieldNames::soundSpeed, 0.0); + const auto massDensity = state.fields(HydroFieldNames::massDensity, 0.0); + const auto volume = state.fields(HydroFieldNames::volume, 0.0); + const auto velocity = state.fields(HydroFieldNames::velocity, Vector::zero); + const auto pressure = state.fields(HydroFieldNames::pressure, 0.0); + const auto position = state.fields(HydroFieldNames::position, Vector::zero); + const auto H = state.fields(HydroFieldNames::H, SymTensor::zero); + + CHECK(soundSpeed.size() == numNodeLists); + CHECK(massDensity.size() == numNodeLists); + CHECK(volume.size() == numNodeLists); + CHECK(velocity.size() == numNodeLists); + CHECK(pressure.size() == numNodeLists); + CHECK(position.size() == numNodeLists); + CHECK(H.size() == numNodeLists); + + auto M = derivatives.fields(HydroFieldNames::M_SPHCorrection, Tensor::zero); + auto DxDt = derivatives.fields(IncrementState::prefix() + HydroFieldNames::position, Vector::zero); + auto DrhoDx = derivatives.fields(GSPHFieldNames::densityGradient, Vector::zero); + auto newRiemannDpDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannPressureGradient,Vector::zero); + auto newRiemannDvDx = derivatives.fields(ReplaceState::prefix() + GSPHFieldNames::RiemannVelocityGradient,Tensor::zero); + auto massSecondMoment = derivatives.fields(HydroFieldNames::massSecondMoment, SymTensor::zero); + auto weightedNeighborSum = derivatives.fields(HydroFieldNames::weightedNeighborSum, 0.0); + //auto HStretchTensor = derivatives.fields("HStretchTensor", SymTensor::zero); + auto normalization = derivatives.fields(HydroFieldNames::normalization, 0.0); + + CHECK(M.size() == numNodeLists); + CHECK(DrhoDx.size() == numNodeLists); + CHECK(DxDt.size() == numNodeLists); + CHECK(newRiemannDpDx.size() == numNodeLists); + CHECK(newRiemannDvDx.size() == numNodeLists); + CHECK(massSecondMoment.size() == numNodeLists) + CHECK(weightedNeighborSum.size() == numNodeLists) + CHECK(normalization.size() == numNodeLists) + //CHECK(HStretchTensor.size() == numNodeLists) + +#pragma omp parallel + { + // Thread private scratch variables + int i, j, nodeListi, nodeListj; + Scalar Wi, Wj, gWi, gWj; + + typename SpheralThreads::FieldListStack threadStack; + auto M_thread = M.threadCopy(threadStack); + auto DrhoDx_thread = DrhoDx.threadCopy(threadStack); + auto newRiemannDpDx_thread = newRiemannDpDx.threadCopy(threadStack); + auto newRiemannDvDx_thread = newRiemannDvDx.threadCopy(threadStack); + auto DxDt_thread = DxDt.threadCopy(threadStack); + auto massSecondMoment_thread = massSecondMoment.threadCopy(threadStack); + auto weightedNeighborSum_thread = weightedNeighborSum.threadCopy(threadStack); + //auto HStretchTensor_thread = HStretchTensor.threadCopy(threadStack); + auto normalization_thread = normalization.threadCopy(threadStack); + +#pragma omp for + for (auto kk = 0u; kk < npairs; ++kk) { + i = pairs[kk].i_node; + j = pairs[kk].j_node; + nodeListi = pairs[kk].i_list; + nodeListj = pairs[kk].j_list; + + // Get the state for node i. + const auto& vi = velocity(nodeListi, i); + const auto& Pi = pressure(nodeListi, i); + const auto& ci = soundSpeed(nodeListi, i); + const auto& rhoi = massDensity(nodeListi, i); + const auto& ri = position(nodeListi, i); + const auto& voli = volume(nodeListi, i); + const auto& Hi = H(nodeListi, i); + const auto Hdeti = Hi.Determinant(); + CHECK(voli > 0.0); + CHECK(Hdeti > 0.0); + + auto& DxDti = DxDt_thread(nodeListi,i); + //auto& HStretchTensori = HStretchTensor_thread(nodeListi,i); + auto& weightedNeighborSumi = weightedNeighborSum_thread(nodeListi,i); + auto& massSecondMomenti = massSecondMoment_thread(nodeListi, i); + auto& normi = normalization(nodeListi,i); + auto& DrhoDxi = DrhoDx_thread(nodeListi, i); + auto& newRiemannDpDxi = newRiemannDpDx_thread(nodeListi, i); + auto& newRiemannDvDxi = newRiemannDvDx_thread(nodeListi, i); + auto& Mi = M_thread(nodeListi, i); + + // Get the state for node j + const auto& vj = velocity(nodeListj, j); + const auto& Pj = pressure(nodeListj, j); + const auto& cj = soundSpeed(nodeListj, j); + const auto& rhoj = massDensity(nodeListj, j); + const auto& rj = position(nodeListj, j); + const auto& volj = volume(nodeListj, j); + const auto& Hj = H(nodeListj, j); + const auto Hdetj = Hj.Determinant(); + CHECK(volj > 0.0); + CHECK(Hdetj > 0.0); + + auto& DxDtj = DxDt_thread(nodeListj,j); + //auto& HStretchTensorj = HStretchTensor_thread(nodeListj,j); + auto& weightedNeighborSumj = weightedNeighborSum_thread(nodeListj,j); + auto& massSecondMomentj = massSecondMoment_thread(nodeListj, j); + auto& normj = normalization(nodeListj,j); + auto& DrhoDxj = DrhoDx_thread(nodeListj, j); + auto& newRiemannDpDxj = newRiemannDpDx_thread(nodeListj, j); + auto& newRiemannDvDxj = newRiemannDvDx_thread(nodeListj, j); + auto& Mj = M_thread(nodeListj, j); + + const auto rij = ri - rj; + //const auto rMagij = safeInv(rij.magnitude()); + + const auto etai = Hi*rij; + const auto etaj = Hj*rij; + const auto etaMagi = etai.magnitude(); + const auto etaMagj = etaj.magnitude(); + CHECK(etaMagi >= 0.0); + CHECK(etaMagj >= 0.0); + + W.kernelAndGradValue(etaMagi, Hdeti, Wi, gWi); + const auto Hetai = Hi*etai.unitVector(); + const auto gradWi = gWi*Hetai; + + W.kernelAndGradValue(etaMagj, Hdetj, Wj, gWj); + const auto Hetaj = Hj*etaj.unitVector(); + const auto gradWj = gWj*Hetaj; + + const auto psii = voli*Wi; + const auto psij = volj*Wj; + const auto gradPsii = voli*gradWi; + const auto gradPsij = volj*gradWj; + + weightedNeighborSumi += std::abs(gWi); + weightedNeighborSumj += std::abs(gWj); + + //HStretchTensori -= voli*rij.selfdyad()*gWi*rMagij; + //HStretchTensorj -= volj*rij.selfdyad()*gWj*rMagij; + + const auto rij2 = rij.magnitude2(); + const auto thpt = rij.selfdyad()*safeInvVar(rij2*rij2*rij2); + massSecondMomenti += gradWi.magnitude2()*thpt; + massSecondMomentj += gradWj.magnitude2()*thpt; + + // gradients + Mi -= rij.dyad(gradPsii); + Mj -= rij.dyad(gradPsij); + + DrhoDxi -= (rhoi - rhoj) * gradPsii; + DrhoDxj -= (rhoi - rhoj) * gradPsij; + + if (calcSpatialGradients){ + newRiemannDpDxi -= (Pi-Pj)*gradPsii; + newRiemannDpDxj -= (Pi-Pj)*gradPsij; + + newRiemannDvDxi -= (vi-vj).dyad(gradPsii); + newRiemannDvDxj -= (vi-vj).dyad(gradPsij); + } + + // node motion relative to fluid + //----------------------------------------------------------- + if (xsphMotion) { + const auto cij = 0.5*(ci+cj); + const auto wij = cij * safeInv(max(10*(vi-vj).magnitude(),cij)); + DxDti -= wij*psii*(vi-vj); + DxDtj -= wij*psij*(vj-vi); + } + if(ficianMotion){ + //const auto fi = FastMath::pow4(Wi/(Hdeti*WnPerh)); + //const auto fj = FastMath::pow4(Wj/(Hdetj*WnPerh)); + DxDti -= -rij*psii; + DxDtj += -rij*psij; + } + + normi += psii;//voli*gradWi.magnitude(); + normj += psij;//volj*gradWj.magnitude(); + + } // loop over pairs + + // Reduce the thread values to the master. + threadReduceFieldLists(threadStack); + + } // OpenMP parallel region + + // Finish up the spatial gradient calculation + for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { + const auto& nodeList = M[nodeListi]->nodeList(); + const auto ni = nodeList.numInternalNodes(); + +#pragma omp parallel for + for (auto i = 0u; i < ni; ++i) { + + const auto numNeighborsi = connectivityMap.numNeighborsForNode(nodeListi, i); + + const auto& ci = soundSpeed(nodeListi,i); + const auto& vi = velocity(nodeListi,i); + const auto& voli = volume(nodeListi,i); + const auto& Hi = H(nodeListi, i); + const auto Hdeti = Hi.Determinant(); + + auto& DxDti = DxDt(nodeListi,i); + auto& Mi = M(nodeListi, i); + auto& massSecondMomenti = massSecondMoment(nodeListi,i); + auto& weightedNeighborSumi = weightedNeighborSum(nodeListi,i); + //auto& HStretchTensori = HStretchTensor(nodeListi,i); + auto& normi = normalization(nodeListi, i); + const auto Mdeti = std::abs(Mi.Determinant()); + + normi += voli*Hdeti*W0; + weightedNeighborSumi = Dimension::rootnu(max(0.0, weightedNeighborSumi/Hdeti)); + //HStretchTensori /= Dimension::rootnu(max(HStretchTensori.Determinant(),tiny)); + massSecondMomenti /= Hdeti*Hdeti; + + const auto enoughNeighbors = numNeighborsi > Dimension::pownu(2); + const auto goodM = (Mdeti > 1e-2 and enoughNeighbors); + + Mi = ( goodM ? Mi.Inverse() : Tensor::one); + + if (correctSpatialGradients){ + auto& newRiemannDpDxi = newRiemannDpDx(nodeListi, i); + auto& newRiemannDvDxi = newRiemannDvDx(nodeListi, i); + auto& DrhoDxi = DrhoDx(nodeListi,i); + + DrhoDxi = Mi.Transpose()*DrhoDxi; + newRiemannDpDxi = Mi.Transpose()*newRiemannDpDxi; + newRiemannDvDxi = newRiemannDvDxi*Mi; + } + + if (xsphMotion) DxDti *= nodeMotionCoeff/max(tiny, normi); + if(ficianMotion) DxDti *= nodeMotionCoeff * ci * ci * Dimension::rootnu(Hdeti) * + safeInv( max(10.0*DxDti.magnitude(),ci)); + if(!noMotion) DxDti += vi; + } + + } + + for (ConstBoundaryIterator boundItr = this->boundaryBegin(); + boundItr != this->boundaryEnd(); + ++boundItr){ + (*boundItr)->applyFieldListGhostBoundary(M); + (*boundItr)->applyFieldListGhostBoundary(DxDt); + } + + if (calcSpatialGradients){ + for (ConstBoundaryIterator boundItr = this->boundaryBegin(); + boundItr != this->boundaryEnd(); + ++boundItr){ + (*boundItr)->applyFieldListGhostBoundary(DrhoDx); + (*boundItr)->applyFieldListGhostBoundary(newRiemannDpDx); + (*boundItr)->applyFieldListGhostBoundary(newRiemannDvDx); + } + } + for (ConstBoundaryIterator boundaryItr = this->boundaryBegin(); + boundaryItr != this->boundaryEnd(); + ++boundaryItr) (*boundaryItr)->finalizeGhostBoundary(); + +} + +} // spheral namespace \ No newline at end of file diff --git a/src/GSPH/MFVHydroBase.cc b/src/GSPH/MFVHydroBase.cc new file mode 100644 index 000000000..f2da58ce8 --- /dev/null +++ b/src/GSPH/MFVHydroBase.cc @@ -0,0 +1,385 @@ +//---------------------------------Spheral++----------------------------------// +// MFVHydroBase -- This is an Arbitrary Eulerian-Lagrangian extension of the +// MFV approach of Hopkins 2015. Its got several node-motion +// approaches which promote more regular particle distributions. +// +// Each of the ALE options defines the velocity of the nodes +// differently. The flux that results from the difference +// between the nodes velocities and the fluid velocity. +// The velocities are defined as follows for the +// NodeMotionTypes: +// +// 1) Eulerian ---- static Nodes +// 2) Lagrangian -- nodal velocity = fluid velocity. (This is +// a spheralized version of MFV so there +// is some flux between nodes) +// 3) Fician ------ nodal velocity = fluid velocity + Fician +// PST correction +// 4) XSPH -------- nodal velocity = xsph velocity +// +// Hopkins P.F. (2015) "A New Class of Accurate, Mesh-Free Hydrodynamic +// Simulation Methods," MNRAS, 450(1):53-110 +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// +// TODO: +// 1 backpressure and fician particle shifting +// 2 Eulerian model will still crash on the Noh implosion due to void particles +// 3 Good implementation of Ngb update +// 4 treatment for material interfaces +//---------------------------------------------------------------------------// + +#include "FileIO/FileIO.hh" +#include "NodeList/SmoothingScaleBase.hh" +#include "Hydro/HydroFieldNames.hh" + +#include "DataBase/DataBase.hh" +#include "DataBase/State.hh" +#include "DataBase/StateDerivatives.hh" +#include "DataBase/IncrementState.hh" +#include "DataBase/ReplaceState.hh" +#include "DataBase/PureReplaceState.hh" +#include "DataBase/ReplaceBoundedState.hh" +#include "DataBase/IncrementBoundedState.hh" + +#include "Field/FieldList.hh" +#include "Field/NodeIterators.hh" +#include "Boundary/Boundary.hh" +#include "Neighbor/ConnectivityMap.hh" + +#include "GSPH/MFVHydroBase.hh" +#include "GSPH/GSPHFieldNames.hh" +#include "GSPH/computeSumVolume.hh" +#include "GSPH/computeMFMDensity.hh" +#include "GSPH/Policies/MassFluxPolicy.hh" +#include "GSPH/Policies/ReplaceWithRatioPolicy.hh" +#include "GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicy.hh" +#include "GSPH/Policies/MFVIncrementVelocityPolicy.hh" +#include "GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicy.hh" +#include "GSPH/RiemannSolvers/RiemannSolverBase.hh" + +#ifdef _OPENMP +#include "omp.h" +#endif + +#include + +using std::string; +using std::min; +using std::max; + +namespace Spheral { + +//------------------------------------------------------------------------------ +// Constructor. +//------------------------------------------------------------------------------ +template +MFVHydroBase:: +MFVHydroBase(const SmoothingScaleBase& smoothingScaleMethod, + DataBase& dataBase, + RiemannSolverBase& riemannSolver, + const TableKernel& W, + const Scalar epsDiffusionCoeff, + const double cfl, + const bool useVelocityMagnitudeForDt, + const bool compatibleEnergyEvolution, + const bool evolveTotalEnergy, + const bool XSPH, + const bool correctVelocityGradient, + const double nodeMotionCoefficient, + const NodeMotionType nodeMotionType, + const GradientType gradType, + const MassDensityType densityUpdate, + const HEvolutionType HUpdate, + const double epsTensile, + const double nTensile, + const Vector& xmin, + const Vector& xmax): + GenericRiemannHydro(smoothingScaleMethod, + dataBase, + riemannSolver, + W, + epsDiffusionCoeff, + cfl, + useVelocityMagnitudeForDt, + compatibleEnergyEvolution, + evolveTotalEnergy, + XSPH, + correctVelocityGradient, + gradType, + densityUpdate, + HUpdate, + epsTensile, + nTensile, + xmin, + xmax), + mNodeMotionCoefficient(nodeMotionCoefficient), + mNodeMotionType(nodeMotionType), + mNodalVelocity(FieldStorageType::CopyFields), + mDmassDt(FieldStorageType::CopyFields), + mDthermalEnergyDt(FieldStorageType::CopyFields), + mDmomentumDt(FieldStorageType::CopyFields), + mDvolumeDt(FieldStorageType::CopyFields), + //mHStretchTensor(FieldStorageType::CopyFields), + mPairMassFlux(){ + mNodalVelocity = dataBase.newFluidFieldList(Vector::zero, GSPHFieldNames::nodalVelocity); + mDmassDt = dataBase.newFluidFieldList(0.0, IncrementState::prefix() + HydroFieldNames::mass); + mDthermalEnergyDt = dataBase.newFluidFieldList(0.0, IncrementState::prefix() + GSPHFieldNames::thermalEnergy); + mDmomentumDt = dataBase.newFluidFieldList(Vector::zero, IncrementState::prefix() + GSPHFieldNames::momentum); + mDvolumeDt = dataBase.newFluidFieldList(0.0, IncrementState::prefix() + HydroFieldNames::volume); + //mHStretchTensor = dataBase.newFluidFieldList(SymTensor::zero, "HStretchTensor"); + mPairMassFlux.clear(); +} + +//------------------------------------------------------------------------------ +// Destructor +//------------------------------------------------------------------------------ +template +MFVHydroBase:: +~MFVHydroBase() { +} + +//------------------------------------------------------------------------------ +// On problem start up, we need to initialize our internal data. +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +initializeProblemStartup(DataBase& dataBase) { + GenericRiemannHydro::initializeProblemStartup(dataBase); +} + +//------------------------------------------------------------------------------ +// Register the state we need/are going to evolve. +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +registerState(DataBase& dataBase, + State& state) { + + GenericRiemannHydro::registerState(dataBase,state); + + dataBase.resizeFluidFieldList(mNodalVelocity, Vector::zero, GSPHFieldNames::nodalVelocity,false); + + auto massDensity = state.fields(HydroFieldNames::massDensity, 0.0); + auto position = state.fields(HydroFieldNames::position,Vector::zero); + auto velocity = state.fields(HydroFieldNames::velocity, Vector::zero); + auto volume = state.fields(HydroFieldNames::volume, 0.0); + auto mass = state.fields(HydroFieldNames::mass, 0.0); + auto specificThermalEnergy = state.fields(HydroFieldNames::specificThermalEnergy, 0.0); + + // We use the thermal energy to update the specific thermal energy + state.removePolicy(specificThermalEnergy,false); + + CHECK(position.numFields() == dataBase.numFluidNodeLists()); + CHECK(velocity.numFields() == dataBase.numFluidNodeLists()); + CHECK(volume.numFields() == dataBase.numFluidNodeLists()); + CHECK(mass.numFields() == dataBase.numFluidNodeLists()); + CHECK(specificThermalEnergy.numFields() == dataBase.numFluidNodeLists()); + + auto nodeListi = 0u; + for (auto itr = dataBase.fluidNodeListBegin(); + itr < dataBase.fluidNodeListEnd(); + ++itr, ++nodeListi) { + auto& massi = (*itr)->mass(); + auto minVolume = massi.min()/(*itr)->rhoMax(); + auto maxVolume = massi.max()/(*itr)->rhoMin(); + state.enroll(*volume[nodeListi], make_policy>(minVolume, + maxVolume)); + } + + + state.enroll(massDensity, make_policy>({HydroFieldNames::mass, + HydroFieldNames::volume}, + HydroFieldNames::mass, + HydroFieldNames::volume)); + + state.enroll(mass, make_policy>({HydroFieldNames::velocity, + HydroFieldNames::specificThermalEnergy})); + + state.enroll(velocity, + make_policy>({HydroFieldNames::specificThermalEnergy})); + + + if (this->compatibleEnergyEvolution()) { + auto thermalEnergyPolicy = make_policy>(dataBase); + state.enroll(specificThermalEnergy, thermalEnergyPolicy); + }else if (this->evolveTotalEnergy()) { + std::cout <<"evolve total energy not implemented for MFV" << std::endl; + } else { + auto thermalEnergyPolicy = make_policy>(); + state.enroll(specificThermalEnergy,thermalEnergyPolicy); + } + +} + +//------------------------------------------------------------------------------ +// Register the state derivative fields. +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +registerDerivatives(DataBase& dataBase, + StateDerivatives& derivs) { + GenericRiemannHydro::registerDerivatives(dataBase,derivs); + dataBase.resizeFluidFieldList(mDmassDt, 0.0, IncrementState::prefix() + HydroFieldNames::mass, false); + dataBase.resizeFluidFieldList(mDthermalEnergyDt, 0.0, IncrementState::prefix() + GSPHFieldNames::thermalEnergy, false); + dataBase.resizeFluidFieldList(mDmomentumDt, Vector::zero, IncrementState::prefix() + GSPHFieldNames::momentum, false); + dataBase.resizeFluidFieldList(mDvolumeDt, 0.0, IncrementState::prefix() + HydroFieldNames::volume, false); + //dataBase.resizeFluidFieldList(mHStretchTensor,SymTensor::zero, "HStretchTensor", false); + derivs.enroll(mDmassDt); + derivs.enroll(mDthermalEnergyDt); + derivs.enroll(mDmomentumDt); + derivs.enroll(mDvolumeDt); + //derivs.enroll(mHStretchTensor); + derivs.enrollAny(GSPHFieldNames::pairMassFlux, mPairMassFlux); +} + +//------------------------------------------------------------------------------ +// This method is called once at the beginning of a timestep, after all state registration. +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +preStepInitialize(const DataBase& dataBase, + State& state, + StateDerivatives& derivs) { + GenericRiemannHydro::preStepInitialize(dataBase,state,derivs); + + if(this->densityUpdate() == MassDensityType::RigorousSumDensity){ + + const auto position = state.fields(HydroFieldNames::position, Vector::zero); + const auto H = state.fields(HydroFieldNames::H, SymTensor::zero); + const auto mass = state.fields(HydroFieldNames::mass, 0.0); + auto massDensity = state.fields(HydroFieldNames::massDensity, 0.0); + auto volume = state.fields(HydroFieldNames::volume, 0.0); + + computeSumVolume(dataBase.connectivityMap(),this->kernel(),position,H,volume); + computeMFMDensity(mass,volume,massDensity); + + for (auto boundaryItr = this->boundaryBegin(); + boundaryItr != this->boundaryEnd(); + ++boundaryItr){ + (*boundaryItr)->applyFieldListGhostBoundary(volume); + (*boundaryItr)->applyFieldListGhostBoundary(massDensity); + } + for (auto boundaryItr = this->boundaryBegin(); + boundaryItr < this->boundaryEnd(); + ++boundaryItr) (*boundaryItr)->finalizeGhostBoundary(); + } +} + +//------------------------------------------------------------------------------ +// Initialize the hydro before calling evaluateDerivatives +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +initialize(const typename Dimension::Scalar time, + const typename Dimension::Scalar dt, + const DataBase& dataBase, + State& state, + StateDerivatives& derivs) { + GenericRiemannHydro::initialize(time,dt,dataBase,state,derivs); +} + +//------------------------------------------------------------------------------ +// Finalize the derivatives. +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +finalizeDerivatives(const typename Dimension::Scalar time, + const typename Dimension::Scalar dt, + const DataBase& dataBase, + const State& state, + StateDerivatives& derivs) const { + // hackish solution and I should be ashamed. + if (this->compatibleEnergyEvolution()) { + auto DpDt = derivs.fields(IncrementState::prefix() + GSPHFieldNames::momentum, Vector::zero); + auto DmDt = derivs.fields(IncrementState::prefix() + HydroFieldNames::mass, 0.0); + for (ConstBoundaryIterator boundaryItr = this->boundaryBegin(); + boundaryItr != this->boundaryEnd(); + ++boundaryItr){ + (*boundaryItr)->applyFieldListGhostBoundary(DpDt); + (*boundaryItr)->applyFieldListGhostBoundary(DmDt); + } + + for (ConstBoundaryIterator boundaryItr = this->boundaryBegin(); + boundaryItr != this->boundaryEnd(); + ++boundaryItr) (*boundaryItr)->finalizeGhostBoundary(); + } +} + +//------------------------------------------------------------------------------ +// Apply the ghost boundary conditions for hydro state fields. +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +applyGhostBoundaries(State& state, + StateDerivatives& derivs) { + GenericRiemannHydro::applyGhostBoundaries(state,derivs); + + auto nodalVelocity = state.fields(GSPHFieldNames::nodalVelocity, Vector::zero); + + for (ConstBoundaryIterator boundaryItr = this->boundaryBegin(); + boundaryItr != this->boundaryEnd(); + ++boundaryItr) { + (*boundaryItr)->applyFieldListGhostBoundary(nodalVelocity); + } +} + +//------------------------------------------------------------------------------ +// Enforce the boundary conditions for hydro state fields. +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +enforceBoundaries(State& state, + StateDerivatives& derivs) { + GenericRiemannHydro::enforceBoundaries(state,derivs); + + auto nodalVelocity = state.fields(GSPHFieldNames::nodalVelocity, Vector::zero); + + for (ConstBoundaryIterator boundaryItr = this->boundaryBegin(); + boundaryItr != this->boundaryEnd(); + ++boundaryItr) { + (*boundaryItr)->enforceFieldListBoundary(nodalVelocity); + } +} + + +//------------------------------------------------------------------------------ +// Dump the current state to the given file. +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +dumpState(FileIO& file, const string& pathName) const { + GenericRiemannHydro::dumpState(file,pathName); + file.write(mNodalVelocity, pathName + "/nodalVelocity"); + file.write(mDmassDt, pathName + "/DmassDt"); + file.write(mDthermalEnergyDt, pathName + "/DthermalEnergyDt"); + file.write(mDmomentumDt, pathName + "/DmomentumDt"); + file.write(mDvolumeDt, pathName + "/DvolumeDt"); +} + +//------------------------------------------------------------------------------ +// Restore the state from the given file. +//------------------------------------------------------------------------------ +template +void +MFVHydroBase:: +restoreState(const FileIO& file, const string& pathName) { + GenericRiemannHydro::restoreState(file,pathName); + file.read(mNodalVelocity, pathName + "/nodalVelocity"); + file.read(mDmassDt, pathName + "/DmassDt"); + file.read(mDthermalEnergyDt, pathName + "/DthermalEnergyDt"); + file.read(mDmomentumDt, pathName + "/DmomentumDt"); + file.read(mDvolumeDt, pathName + "/DvolumeDt"); +} + +} + diff --git a/src/GSPH/MFVHydroBase.hh b/src/GSPH/MFVHydroBase.hh new file mode 100644 index 000000000..593fde4bf --- /dev/null +++ b/src/GSPH/MFVHydroBase.hh @@ -0,0 +1,219 @@ +//---------------------------------Spheral++----------------------------------// +// MFVHydroBase -- This is an Arbitrary Eulerian-Lagrangian extension of the +// MFV approach of Hopkins 2015. Its got several node-motion +// approaches which promote more regular particle distributions. +// +// Each of the ALE options defines the velocity of the nodes +// differently. The flux that results from the difference +// between the nodes velocities and the fluid velocity. +// The velocities are defined as follows for the +// NodeMotionTypes: +// +// 1) Eulerian ---- static Nodes +// 2) Lagrangian -- nodal velocity = fluid velocity. (This is +// a spheralized version of MFV so there +// is some flux between nodes) +// 3) Fician ------ nodal velocity = fluid velocity + Fician +// PST correction +// 4) XSPH -------- nodal velocity = xsph velocity +// +// Hopkins P.F. (2015) "A New Class of Accurate, Mesh-Free Hydrodynamic +// Simulation Methods," MNRAS, 450(1):53-110 +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// +// TODO: +// 1 backpressure and fician particle shifting +// 2 Eulerian model will still crash on the Noh implosion due to void particles +// 3 Good implementation of Ngb update +// 4 treatment for material interfaces +//---------------------------------------------------------------------------// + +#ifndef __Spheral_MFVHydroBase_hh__ +#define __Spheral_MFVHydroBase_hh__ + +#include + +#include "GSPH/GenericRiemannHydro.hh" + +namespace Spheral { + +enum class NodeMotionType { + Lagrangian = 0, + Eulerian = 1, + Fician = 2, + XSPH = 3, + BackgroundPressure = 4, +}; + +template class State; +template class StateDerivatives; +template class SmoothingScaleBase; +template class TableKernel; +template class RiemannSolverBase; +template class DataBase; +template class Field; +template class FieldList; +class FileIO; + +template +class MFVHydroBase: public GenericRiemannHydro { + +public: + //--------------------------- Public Interface ---------------------------// + typedef typename Dimension::Scalar Scalar; + typedef typename Dimension::Vector Vector; + typedef typename Dimension::Tensor Tensor; + typedef typename Dimension::SymTensor SymTensor; + typedef typename Dimension::ThirdRankTensor ThirdRankTensor; + + typedef typename GenericRiemannHydro::TimeStepType TimeStepType; + typedef typename GenericRiemannHydro::ConstBoundaryIterator ConstBoundaryIterator; + + // Constructors. + MFVHydroBase(const SmoothingScaleBase& smoothingScaleMethod, + DataBase& dataBase, + RiemannSolverBase& riemannSolver, + const TableKernel& W, + const Scalar epsDiffusionCoeff, + const double cfl, + const bool useVelocityMagnitudeForDt, + const bool compatibleEnergyEvolution, + const bool evolveTotalEnergy, + const bool XSPH, + const bool correctVelocityGradient, + const double nodeMotionCoefficient, + const NodeMotionType nodeMotionType, + const GradientType gradType, + const MassDensityType densityUpdate, + const HEvolutionType HUpdate, + const double epsTensile, + const double nTensile, + const Vector& xmin, + const Vector& xmax); + + // Destructor. + virtual ~MFVHydroBase(); + + // Tasks we do once on problem startup. + virtual + void initializeProblemStartup(DataBase& dataBase) override; + + // Register the state Hydro expects to use and evolve. + virtual + void registerState(DataBase& dataBase, + State& state) override; + + // Register the derivatives/change fields for updating state. + virtual + void registerDerivatives(DataBase& dataBase, + StateDerivatives& derivs) override; + + + // This method is called once at the beginning of a timestep, after all state registration. + virtual void preStepInitialize(const DataBase& dataBase, + State& state, + StateDerivatives& derivs) override; + + // Initialize the Hydro before we start a derivative evaluation. + virtual + void initialize(const Scalar time, + const Scalar dt, + const DataBase& dataBase, + State& state, + StateDerivatives& derivs) override; + + // Evaluate the derivatives for the principle hydro variables: + // mass density, velocity, and specific thermal energy. + virtual + void evaluateDerivatives(const Scalar time, + const Scalar dt, + const DataBase& dataBase, + const State& state, + StateDerivatives& derivatives) const override; + void + firstDerivativesLoop(const typename Dimension::Scalar time, + const typename Dimension::Scalar dt, + const DataBase& dataBase, + const State& state, + StateDerivatives& derivatives) const; + + void + secondDerivativesLoop(const typename Dimension::Scalar time, + const typename Dimension::Scalar dt, + const DataBase& dataBase, + const State& state, + StateDerivatives& derivatives) const; + + // Finalize the derivatives. + virtual + void finalizeDerivatives(const Scalar time, + const Scalar dt, + const DataBase& dataBase, + const State& state, + StateDerivatives& derivs) const override; + + // Apply boundary conditions to the physics specific fields. + virtual + void applyGhostBoundaries(State& state, + StateDerivatives& derivs) override; + + // Enforce boundary conditions for the physics specific fields. + virtual + void enforceBoundaries(State& state, + StateDerivatives& derivs) override; + + Scalar nodeMotionCoefficient() const; + void nodeMotionCoefficient(const Scalar x); + + NodeMotionType nodeMotionType() const; + void nodeMotionType(NodeMotionType x); + + const FieldList& nodalVelocity() const; + const FieldList& DmassDt() const; + const FieldList& DthermalEnergyDt() const; + const FieldList& DmomentumDt() const; + const FieldList& DvolumeDt() const; + //const FieldList& HStretchTensor() const; + + const std::vector& pairMassFlux() const; + + //**************************************************************************** + // Methods required for restarting. + virtual std::string label() const override { return "MFVHydroBase" ; } + virtual void dumpState(FileIO& file, const std::string& pathName) const override; + virtual void restoreState(const FileIO& file, const std::string& pathName) override; + //**************************************************************************** +private: + + Scalar mNodeMotionCoefficient; + + NodeMotionType mNodeMotionType; + + FieldList mNodalVelocity; + FieldList mDmassDt; + FieldList mDthermalEnergyDt; + FieldList mDmomentumDt; + FieldList mDvolumeDt; + //FieldList mHStretchTensor; + + std::vector mPairMassFlux; + + // No default constructor, copying, or assignment. + MFVHydroBase(); + MFVHydroBase(const MFVHydroBase&); + MFVHydroBase& operator=(const MFVHydroBase&); +}; + +} + +#include "MFVHydroBaseInline.hh" + +#else + +// Forward declaration. +namespace Spheral { + template class MFVHydroBase; +} + +#endif diff --git a/src/GSPH/MFVHydroBaseInline.hh b/src/GSPH/MFVHydroBaseInline.hh new file mode 100644 index 000000000..7e945fa09 --- /dev/null +++ b/src/GSPH/MFVHydroBaseInline.hh @@ -0,0 +1,94 @@ +namespace Spheral { + + +//------------------------------------------------------------------------------ +// set/get for mesh motion coefficient +//------------------------------------------------------------------------------ +template +inline +typename Dimension::Scalar +MFVHydroBase::nodeMotionCoefficient() const { + return mNodeMotionCoefficient; +} + +template +inline +void +MFVHydroBase:: +nodeMotionCoefficient(typename Dimension::Scalar x) { + mNodeMotionCoefficient = x; +} + +//------------------------------------------------------------------------------ +// set/get mesh motion type +//------------------------------------------------------------------------------ +template +inline +NodeMotionType +MFVHydroBase:: +nodeMotionType() const { + return mNodeMotionType; +} + +template +inline +void +MFVHydroBase:: +nodeMotionType(NodeMotionType x) { + mNodeMotionType=x; +} + +//------------------------------------------------------------------------------ +// The internal state field lists. +//------------------------------------------------------------------------------ +template +inline +const FieldList& +MFVHydroBase:: +nodalVelocity() const { + return mNodalVelocity; +} + +template +inline +const FieldList& +MFVHydroBase:: +DmassDt() const { + return mDmassDt; +} +template +inline +const FieldList& +MFVHydroBase:: +DthermalEnergyDt() const { + return mDthermalEnergyDt; +} +template +inline +const FieldList& +MFVHydroBase:: +DmomentumDt() const { + return mDmomentumDt; +} +template +inline +const FieldList& +MFVHydroBase:: +DvolumeDt() const { + return mDvolumeDt; +} +template +inline +const std::vector& +MFVHydroBase:: +pairMassFlux() const { + return mPairMassFlux; +} +// template +// inline +// const FieldList& +// MFVHydroBase:: +// HStretchTensor() const { +// return mHStretchTensor; +// } +} \ No newline at end of file diff --git a/src/GSPH/MFVHydroBaseInst.cc.py b/src/GSPH/MFVHydroBaseInst.cc.py new file mode 100644 index 000000000..4f1510ced --- /dev/null +++ b/src/GSPH/MFVHydroBaseInst.cc.py @@ -0,0 +1,12 @@ +text = """ +//------------------------------------------------------------------------------ +// Explict instantiation. +//------------------------------------------------------------------------------ +#include "GSPH/MFVHydroBase.cc" +#include "GSPH/MFVEvaluateDerivatives.cc" +#include "Geometry/Dimension.hh" + +namespace Spheral { + template class MFVHydroBase< Dim< %(ndim)s > >; +} +""" diff --git a/src/GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicy.cc b/src/GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicy.cc new file mode 100644 index 000000000..eb7a33a27 --- /dev/null +++ b/src/GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicy.cc @@ -0,0 +1,226 @@ +//---------------------------------Spheral++----------------------------------// +// CompatibleMFVSpecificThermalEnergyPolicy -- This is a generalization of the +// Lagrangian compatible energy scheme to ALE-based scheme with mass flux +// between nodes. +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// + +#include "GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicy.hh" +#include "GSPH/GSPHFieldNames.hh" + +#include "Hydro/HydroFieldNames.hh" + +#include "NodeList/NodeList.hh" +#include "NodeList/FluidNodeList.hh" + +#include "DataBase/IncrementState.hh" +#include "DataBase/DataBase.hh" +#include "DataBase/State.hh" +#include "DataBase/StateDerivatives.hh" + +#include "Neighbor/ConnectivityMap.hh" + +#include "Field/Field.hh" +#include "Field/FieldList.hh" + +#include "Utilities/DBC.hh" +#include "Utilities/safeInv.hh" +#include "Utilities/SpheralFunctions.hh" + +#include +#include +using std::vector; +using std::numeric_limits; +using std::abs; +using std::min; +using std::max; + +namespace Spheral { + +//------------------------------------------------------------------------------ +// Constructor. +//------------------------------------------------------------------------------ +template +CompatibleMFVSpecificThermalEnergyPolicy:: +CompatibleMFVSpecificThermalEnergyPolicy(const DataBase& dataBase): + UpdatePolicyBase(), + mDataBasePtr(&dataBase){ +} + +//------------------------------------------------------------------------------ +// Destructor. +//------------------------------------------------------------------------------ +template +CompatibleMFVSpecificThermalEnergyPolicy:: +~CompatibleMFVSpecificThermalEnergyPolicy() { +} + +//------------------------------------------------------------------------------ +// Update the field. +//------------------------------------------------------------------------------ +template +void +CompatibleMFVSpecificThermalEnergyPolicy:: +update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double /*t*/, + const double /*dt*/) { + + KeyType fieldKey, nodeListKey; + StateBase::splitFieldKey(key, fieldKey, nodeListKey); + REQUIRE(fieldKey == HydroFieldNames::specificThermalEnergy and + nodeListKey == UpdatePolicyBase::wildcard()); + auto eps = state.fields(fieldKey, Scalar()); + const auto numFields = eps.numFields(); + + // constant we'll need for the weighting scheme + const auto tiny = numeric_limits::epsilon(); + + // Get the state fields. + const auto mass = state.fields(HydroFieldNames::mass, Scalar()); + const auto velocity = state.fields(HydroFieldNames::velocity, Vector::zero); + const auto DmassDt = derivs.fields(IncrementState::prefix() + HydroFieldNames::mass, 0.0); + const auto DmomentumDt = derivs.fields(IncrementState::prefix() + GSPHFieldNames::momentum, Vector::zero); + const auto& pairAccelerations = derivs.getAny(HydroFieldNames::pairAccelerations, vector()); + const auto& pairDepsDt = derivs.getAny(HydroFieldNames::pairWork, vector()); + const auto& pairMassFlux = derivs.getAny(GSPHFieldNames::pairMassFlux, vector()); + + const auto& connectivityMap = mDataBasePtr->connectivityMap(); + const auto& pairs = connectivityMap.nodePairList(); + const auto npairs = pairs.size(); + + CHECK(pairAccelerations.size() == npairs); + CHECK(pairMassFlux.size() == npairs); + CHECK(pairDepsDt.size() == 2*npairs); + + auto DepsDt = derivs.fields(IncrementState::prefix() + GSPHFieldNames::thermalEnergy, 0.0); + DepsDt.Zero(); + + const auto hdt = 0.5*multiplier; + + // Walk all pairs and figure out the discrete work for each point +#pragma omp parallel + { + // Thread private variables + auto DepsDt_thread = DepsDt.threadCopy(); + +#pragma omp for + for (auto kk = 0u; kk < npairs; ++kk) { + const auto i = pairs[kk].i_node; + const auto j = pairs[kk].j_node; + const auto nodeListi = pairs[kk].i_list; + const auto nodeListj = pairs[kk].j_list; + + const auto& paccij = pairAccelerations[kk]; + const auto& DepsDt0i = pairDepsDt[2*kk]; + const auto& DepsDt0j = pairDepsDt[2*kk+1]; + const auto& massFlux = pairMassFlux[kk]; + + const auto mi = mass(nodeListi, i); + const auto pi = mi*velocity(nodeListi, i); + const auto& DPDti = DmomentumDt(nodeListi, i); + const auto& DmDti = DmassDt(nodeListi,i); + + const auto mj = mass(nodeListj, j); + const auto pj = mj*velocity(nodeListj, j); + const auto& DPDtj = DmomentumDt(nodeListj, j); + const auto& DmDtj = DmassDt(nodeListj,j); + + // half-step momenta + const auto pi12 = pi + DPDti*hdt; + const auto pj12 = pj + DPDtj*hdt; + //const auto pij = pi12 - pj12; + + // weighting scheme + const auto weighti = abs(DepsDt0i) + tiny; + const auto weightj = abs(DepsDt0j) + tiny; + const auto wi = weighti/(weighti+weightj); + + // safeInv + const auto invmi0 = safeInv(mi); + const auto invmj0 = safeInv(mj); + const auto invmi1 = safeInv(mi+DmDti*multiplier); + const auto invmj1 = safeInv(mj+DmDtj*multiplier); + + const Scalar delta_duij = (pi12*invmi1 - pj12*invmj1).dot(paccij) + + (pj.dot(pj)*invmj0*invmj1 - pi.dot(pi)*invmi0*invmi1) * massFlux*0.5 + - DepsDt0i-DepsDt0j; + + CHECK(wi >= 0.0 and wi <= 1.0); + CHECK(invmi0 >= 0.0); + CHECK(invmj0 >= 1.0); + + const auto depsi = (wi *delta_duij+DepsDt0i); + const auto depsj = ((1.0-wi)*delta_duij+DepsDt0j); + + // make conservative + DepsDt_thread(nodeListi, i) += depsi; + DepsDt_thread(nodeListj, j) += depsj; + + } + +#pragma omp critical + { + DepsDt_thread.threadReduce(); + } + } + +// // Now we can update the energy. + for (auto nodeListi = 0u; nodeListi < numFields; ++nodeListi) { + const auto n = eps[nodeListi]->numInternalElements(); +#pragma omp parallel for + for (auto i = 0u; i < n; ++i) { + const auto m1 = mass(nodeListi,i)+DmassDt(nodeListi,i)*multiplier; + if (m1 > tiny) eps(nodeListi, i) += (DepsDt(nodeListi, i) - DmassDt(nodeListi, i)*eps(nodeListi, i)) * multiplier * safeInv(m1); + } + } + +} + +//------------------------------------------------------------------------------ +// Update the field using increments +//------------------------------------------------------------------------------ +template +void +CompatibleMFVSpecificThermalEnergyPolicy:: +updateAsIncrement(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) { + + KeyType fieldKey, nodeListKey; + StateBase::splitFieldKey(key, fieldKey, nodeListKey); + REQUIRE(fieldKey == HydroFieldNames::specificThermalEnergy and + nodeListKey == UpdatePolicyBase::wildcard()); + auto eps = state.fields(fieldKey, Scalar()); + + // Build an increment policy to use. + IncrementState fpolicy; + + // Do the deed for each of our Fields. + for (auto fptr: eps) { + fpolicy.updateAsIncrement(State::key(*fptr), + state, derivs, multiplier, t, dt); + } +} + +//------------------------------------------------------------------------------ +// Equivalence operator. +//------------------------------------------------------------------------------ +template +bool +CompatibleMFVSpecificThermalEnergyPolicy:: +operator==(const UpdatePolicyBase& rhs) const { + + // We're only equal if the other guy is also an increment operator. + const auto* rhsPtr = dynamic_cast*>(&rhs); + return rhsPtr != nullptr; +} + +} + diff --git a/src/GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicy.hh b/src/GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicy.hh new file mode 100644 index 000000000..2165468cd --- /dev/null +++ b/src/GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicy.hh @@ -0,0 +1,76 @@ +//---------------------------------Spheral++----------------------------------// +// CompatibleMFVSpecificThermalEnergyPolicy -- This is a generalization of the +// Lagrangian compatible energy scheme to ALE-based scheme with mass flux +// between nodes. +// +// J.M. Pearl 2024 +//----------------------------------------------------------------------------// + +#ifndef __Spheral_CompatibleMFVSpecificThermalEnergyPolicy_hh__ +#define __Spheral_CompatibleMFVSpecificThermalEnergyPolicy_hh__ + +#include "DataBase/UpdatePolicyBase.hh" + +#include + +namespace Spheral { + +// Forward declarations. +template class State; +template class StateDerivatives; +template class FluidNodeList; +template class FieldList; +template class DataBase; + +template +class CompatibleMFVSpecificThermalEnergyPolicy: + public UpdatePolicyBase { +public: + //--------------------------- Public Interface ---------------------------// + // Useful typedefs + using Scalar = typename Dimension::Scalar; + using Vector = typename Dimension::Vector; + using KeyType = typename UpdatePolicyBase::KeyType; + + // Constructors, destructor. + CompatibleMFVSpecificThermalEnergyPolicy(const DataBase& db); + virtual ~CompatibleMFVSpecificThermalEnergyPolicy(); + + // Overload the methods describing how to update Fields. + virtual void update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) override; + + // If the derivative stored values for the pair-accelerations has not been updated, + // we need to just time advance normally. + virtual void updateAsIncrement(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) override; + + // Equivalence. + virtual bool operator==(const UpdatePolicyBase& rhs) const override; + +private: + //--------------------------- Private Interface ---------------------------// + const DataBase* mDataBasePtr; + + CompatibleMFVSpecificThermalEnergyPolicy(const CompatibleMFVSpecificThermalEnergyPolicy& rhs); + CompatibleMFVSpecificThermalEnergyPolicy& operator=(const CompatibleMFVSpecificThermalEnergyPolicy& rhs); +}; + +} + +#else + +// Forward declaration. +namespace Spheral { + template class CompatibleMFVSpecificThermalEnergyPolicy; +} + +#endif diff --git a/src/GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicyInst.cc.py b/src/GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicyInst.cc.py new file mode 100644 index 000000000..2559684fc --- /dev/null +++ b/src/GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicyInst.cc.py @@ -0,0 +1,11 @@ +text = """ +//------------------------------------------------------------------------------ +// Explicit instantiation. +//------------------------------------------------------------------------------ +#include "Geometry/Dimension.hh" +#include "GSPH/Policies/CompatibleMFVSpecificThermalEnergyPolicy.cc" + +namespace Spheral { + template class CompatibleMFVSpecificThermalEnergyPolicy >; +} +""" diff --git a/src/GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicy.cc b/src/GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicy.cc new file mode 100644 index 000000000..1d240cede --- /dev/null +++ b/src/GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicy.cc @@ -0,0 +1,94 @@ +//---------------------------------Spheral++----------------------------------// +// MFVIncrementSpecificThermalEnergyPolicy -- This is a specialized increment +// policy for the specific thermal energy for schemes that allow +// for flux between nodes. The specific thermal energy is updated +// based on the time derivative of thermal energy. The mass and +// time derivative are needed to got from thermal to specific +// thermal. +// +// J.M. Pearl 2022 +//----------------------------------------------------------------------------// +// TODO: the edge case handing for m->0 needs to be improved to robustly +// handle void when full Eulerian. +//----------------------------------------------------------------------------// + +#include "GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicy.hh" +#include "GSPH/GSPHFieldNames.hh" +#include "DataBase/IncrementState.hh" +#include "DataBase/State.hh" +#include "DataBase/StateDerivatives.hh" +#include "Field/Field.hh" +#include "Utilities/DBC.hh" +#include "Hydro/HydroFieldNames.hh" + +#include + +namespace Spheral { +//------------------------------------------------------------------------------ +// Constructors. +//------------------------------------------------------------------------------ +template +MFVIncrementSpecificThermalEnergyPolicy:: +MFVIncrementSpecificThermalEnergyPolicy(std::initializer_list depends): + FieldUpdatePolicy(depends){ +} + +//------------------------------------------------------------------------------ +// Destructor. +//------------------------------------------------------------------------------ +template +MFVIncrementSpecificThermalEnergyPolicy:: +~MFVIncrementSpecificThermalEnergyPolicy() { +} + +//------------------------------------------------------------------------------ +// Update the field. +//------------------------------------------------------------------------------ +template +void +MFVIncrementSpecificThermalEnergyPolicy:: +update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) { + + const auto tiny = std::numeric_limits::epsilon(); + + KeyType fieldKey, nodeListKey; + StateBase::splitFieldKey(key, fieldKey, nodeListKey); + + const auto massKey = StateBase::buildFieldKey(HydroFieldNames::mass, nodeListKey); + const auto derivFieldKey = StateBase::buildFieldKey(prefix() + GSPHFieldNames::thermalEnergy, nodeListKey); + + const auto& m = state.field(massKey, Scalar()); + auto& eps = state.field(key, Scalar()); + + const auto& DmDt = derivs.field(prefix() + massKey, Scalar()); + const auto& DmepsDt = derivs.field(derivFieldKey, Scalar()); + + const auto n = m.numInternalElements(); +#pragma omp parallel for + for (auto i = 0u; i < n; ++i) { + const auto m1 = m(i)+DmDt(i)*multiplier; + if (m1 > tiny) eps(i) += (DmepsDt(i) - DmDt(i)*eps(i)) * multiplier * safeInv(m1); + } + +} + +//------------------------------------------------------------------------------ +// Equivalence operator. +//------------------------------------------------------------------------------ +template +bool +MFVIncrementSpecificThermalEnergyPolicy:: +operator==(const UpdatePolicyBase& rhs) const { + + // We're only equal if the other guy is also an replace operator. + const auto* rhsPtr = dynamic_cast*>(&rhs); + return rhsPtr != nullptr; +} + +} + diff --git a/src/GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicy.hh b/src/GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicy.hh new file mode 100644 index 000000000..703237b7b --- /dev/null +++ b/src/GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicy.hh @@ -0,0 +1,70 @@ +//---------------------------------Spheral++----------------------------------// +// MFVIncrementSpecificThermalEnergyPolicy -- This is a specialized increment +// policy for the specific thermal energy for schemes that allow +// for flux between nodes. The specific thermal energy is updated +// based on the time derivative of thermal energy. The mass and +// time derivative are needed to got from thermal to specific +// thermal. +// +// J.M. Pearl 2022 +//----------------------------------------------------------------------------// +// TODO: the edge case handing for m->0 needs to be improved to robustly +// handle void when full Eulerian. +//----------------------------------------------------------------------------// + +#ifndef __Spheral_MFVIncrementSpecificThermalEnergyPolicy_hh__ +#define __Spheral_MFVIncrementSpecificThermalEnergyPolicy_hh__ + +#include "DataBase/FieldUpdatePolicy.hh" + +#include + +namespace Spheral { + +template +class MFVIncrementSpecificThermalEnergyPolicy: public FieldUpdatePolicy { +public: + + //--------------------------- Public Interface ---------------------------// + // Useful typedefs + using Scalar = typename Dimension::Scalar; + using Vector = typename Dimension::Vector; + using KeyType = typename FieldUpdatePolicy::KeyType; + + // Constructors, destructor. + MFVIncrementSpecificThermalEnergyPolicy(std::initializer_list depends={}); + ~MFVIncrementSpecificThermalEnergyPolicy(); + + // Overload the methods describing how to update FieldLists. + virtual void update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) override; + + // Equivalence. + virtual bool operator==(const UpdatePolicyBase& rhs) const override; + + static const std::string prefix() { return "delta "; } + +private: + + const std::string mStateKey; + const std::string mDerivativeKey; + + //--------------------------- Private Interface ---------------------------// + MFVIncrementSpecificThermalEnergyPolicy(const MFVIncrementSpecificThermalEnergyPolicy& rhs); + MFVIncrementSpecificThermalEnergyPolicy& operator=(const MFVIncrementSpecificThermalEnergyPolicy& rhs); +}; + +} + +#else + +// Forward declaration. +namespace Spheral { + template class MFVIncrementSpecificThermalEnergyPolicy; +} + +#endif diff --git a/src/GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicyInst.cc.py b/src/GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicyInst.cc.py new file mode 100644 index 000000000..975b8a38b --- /dev/null +++ b/src/GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicyInst.cc.py @@ -0,0 +1,11 @@ +text = """ +//------------------------------------------------------------------------------ +// Explicit instantiation. +//------------------------------------------------------------------------------ +#include "Geometry/Dimension.hh" +#include "GSPH/Policies/MFVIncrementSpecificThermalEnergyPolicy.cc" + +namespace Spheral { + template class MFVIncrementSpecificThermalEnergyPolicy>; +} +""" diff --git a/src/GSPH/Policies/MFVIncrementVelocityPolicy.cc b/src/GSPH/Policies/MFVIncrementVelocityPolicy.cc new file mode 100644 index 000000000..1083c332e --- /dev/null +++ b/src/GSPH/Policies/MFVIncrementVelocityPolicy.cc @@ -0,0 +1,94 @@ +//---------------------------------Spheral++----------------------------------// +// MFVIncrementVelocityPolicy -- specialized policy for hydros that allow for mass +// flux between nodes. The momentum time derivative +// is used to update the velocity. The "hydro acceleration" +// is also added in to be compatible w/ phys packages +// that apply a pure acceleration. +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// +// TODO : HydroAcceleration needs to be added in +//----------------------------------------------------------------------------// + +#include "GSPH/Policies/MFVIncrementVelocityPolicy.hh" +#include "GSPH/GSPHFieldNames.hh" +#include "DataBase/IncrementState.hh" +#include "DataBase/State.hh" +#include "DataBase/StateDerivatives.hh" +#include "Field/Field.hh" +#include "Utilities/DBC.hh" +#include "Hydro/HydroFieldNames.hh" + +#include + +namespace Spheral { +//------------------------------------------------------------------------------ +// Constructors. +//------------------------------------------------------------------------------ +template +MFVIncrementVelocityPolicy:: +MFVIncrementVelocityPolicy(std::initializer_list depends): + FieldUpdatePolicy(depends){ +} + +//------------------------------------------------------------------------------ +// Destructor. +//------------------------------------------------------------------------------ +template +MFVIncrementVelocityPolicy:: +~MFVIncrementVelocityPolicy() { +} + +//------------------------------------------------------------------------------ +// Update the field. +//------------------------------------------------------------------------------ +template +void +MFVIncrementVelocityPolicy:: +update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) { + + const auto tiny = std::numeric_limits::epsilon(); + + KeyType fieldKey, nodeListKey; + StateBase::splitFieldKey(key, fieldKey, nodeListKey); + + const auto massKey = StateBase::buildFieldKey(HydroFieldNames::mass, nodeListKey); + const auto momDerivFieldKey = StateBase::buildFieldKey(prefix() + GSPHFieldNames::momentum, nodeListKey); + //const auto accDerivFieldKey = StateBase::buildFieldKey(HydroFieldNames::hydroAcceleration, nodeListKey); + + const auto& m = state.field(massKey, Scalar()); + auto& v = state.field(key, Vector()); + + const auto& DmDt = derivs.field(prefix() + massKey, Scalar()); + const auto& DpDt = derivs.field(momDerivFieldKey, Vector()); + //const auto& DvDt = derivs.field(accDerivFieldKey, Vector()); + + const auto n = m.numInternalElements(); +#pragma omp parallel for + for (auto i = 0u; i < n; ++i) { + const auto m1 = m(i)+DmDt(i)*multiplier; + const auto DpDti = DpDt(i); + if (m1 > tiny) v(i) += (DpDti - DmDt(i)*v(i)) * multiplier * safeInv(m1); + } +} + +//------------------------------------------------------------------------------ +// Equivalence operator. +//------------------------------------------------------------------------------ +template +bool +MFVIncrementVelocityPolicy:: +operator==(const UpdatePolicyBase& rhs) const { + + // We're only equal if the other guy is also an replace operator. + const auto* rhsPtr = dynamic_cast*>(&rhs); + return rhsPtr != nullptr; +} + +} + diff --git a/src/GSPH/Policies/MFVIncrementVelocityPolicy.hh b/src/GSPH/Policies/MFVIncrementVelocityPolicy.hh new file mode 100644 index 000000000..979499cbf --- /dev/null +++ b/src/GSPH/Policies/MFVIncrementVelocityPolicy.hh @@ -0,0 +1,65 @@ +//---------------------------------Spheral++----------------------------------// +// MFVIncrementVelocityPolicy -- specialized policy for hydros that allow for mass +// flux between nodes. The momentum time derivative +// is used to update the velocity. The "hydro acceleration" +// is also added in to be compatible w/ phys packages +// that apply a pure acceleration. +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// +// TODO : HydroAcceleration needs to be added in +//----------------------------------------------------------------------------// + +#ifndef __Spheral_MFVIncrementVelocityPolicy_hh__ +#define __Spheral_MFVIncrementVelocityPolicy_hh__ + +#include "DataBase/FieldUpdatePolicy.hh" + +#include + +namespace Spheral { + +template +class MFVIncrementVelocityPolicy: public FieldUpdatePolicy { +public: + + //--------------------------- Public Interface ---------------------------// + // Useful typedefs + using Scalar = typename Dimension::Scalar; + using Vector = typename Dimension::Vector; + using KeyType = typename FieldUpdatePolicy::KeyType; + + // Constructors, destructor. + MFVIncrementVelocityPolicy(std::initializer_list depends={}); + ~MFVIncrementVelocityPolicy(); + + // Overload the methods describing how to update FieldLists. + virtual void update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) override; + + // Equivalence. + virtual bool operator==(const UpdatePolicyBase& rhs) const override; + + static const std::string prefix() { return "delta "; } + +private: + + //--------------------------- Private Interface ---------------------------// + MFVIncrementVelocityPolicy(const MFVIncrementVelocityPolicy& rhs); + MFVIncrementVelocityPolicy& operator=(const MFVIncrementVelocityPolicy& rhs); +}; + +} + +#else + +// Forward declaration. +namespace Spheral { + template class MFVIncrementVelocityPolicy; +} + +#endif diff --git a/src/GSPH/Policies/MFVIncrementVelocityPolicyInst.cc.py b/src/GSPH/Policies/MFVIncrementVelocityPolicyInst.cc.py new file mode 100644 index 000000000..76a744576 --- /dev/null +++ b/src/GSPH/Policies/MFVIncrementVelocityPolicyInst.cc.py @@ -0,0 +1,11 @@ +text = """ +//------------------------------------------------------------------------------ +// Explicit instantiation. +//------------------------------------------------------------------------------ +#include "Geometry/Dimension.hh" +#include "GSPH/Policies/MFVIncrementVelocityPolicy.cc" + +namespace Spheral { + template class MFVIncrementVelocityPolicy>; +} +""" diff --git a/src/GSPH/Policies/MassFluxPolicy.cc b/src/GSPH/Policies/MassFluxPolicy.cc new file mode 100644 index 000000000..448be74e1 --- /dev/null +++ b/src/GSPH/Policies/MassFluxPolicy.cc @@ -0,0 +1,73 @@ +//---------------------------------Spheral++----------------------------------// +// MassFluxPolicy -- update method for ALE - based hydro schemes that allow +// for mass flux between nodes. +// +// J. M. Pearl 2023 +//----------------------------------------------------------------------------// + +#include "MassFluxPolicy.hh" +#include "Hydro/HydroFieldNames.hh" +#include "DataBase/IncrementState.hh" +#include "DataBase/State.hh" +#include "DataBase/StateDerivatives.hh" +#include "Field/Field.hh" +#include "Utilities/DBC.hh" + +namespace Spheral { + + +//------------------------------------------------------------------------------ +// Constructor. +//------------------------------------------------------------------------------ +template +MassFluxPolicy:: +MassFluxPolicy(std::initializer_list depends): + IncrementState(depends) { +} + +//------------------------------------------------------------------------------ +// Destructor. +//------------------------------------------------------------------------------ +template +MassFluxPolicy:: +~MassFluxPolicy() { +} + +//------------------------------------------------------------------------------ +// Update the field. +//------------------------------------------------------------------------------ +template +void +MassFluxPolicy:: +update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double /*t*/, + const double /*dt*/) { + + auto& m = state.field(key, 0.0); + const auto& dmdt = derivs.field(this->prefix() + key, 0.0); + + const auto n = m.numInternalElements(); +#pragma omp parallel for + for (auto i = 0u; i < n; ++i) { + m(i) += multiplier*(dmdt(i)); + } +} + +//------------------------------------------------------------------------------ +// Equivalence operator. +//------------------------------------------------------------------------------ +template +bool +MassFluxPolicy:: +operator==(const UpdatePolicyBase& rhs) const { + + // We're only equal if the other guy is also an increment operator. + const auto* rhsPtr = dynamic_cast*>(&rhs); + return rhsPtr != nullptr; +} + +} + diff --git a/src/GSPH/Policies/MassFluxPolicy.hh b/src/GSPH/Policies/MassFluxPolicy.hh new file mode 100644 index 000000000..5cf4a3ce3 --- /dev/null +++ b/src/GSPH/Policies/MassFluxPolicy.hh @@ -0,0 +1,63 @@ +//---------------------------------Spheral++----------------------------------// +// MassFluxPolicy -- update method for ALE - based hydro schemes that allow +// for mass flux between nodes. +// +// J. M. Pearl 2023 +//----------------------------------------------------------------------------// + +#ifndef __Spheral_MassFluxPolicy_hh__ +#define __Spheral_MassFluxPolicy_hh__ + +#include "DataBase/IncrementState.hh" + +#include + +namespace Spheral { + +// Forward declarations. +template class State; +template class StateDerivatives; +template class FluidNodeList; +template class FieldList; + +template +class MassFluxPolicy: + public IncrementState { +public: + //--------------------------- Public Interface ---------------------------// + // Useful typedefs + using Scalar = typename Dimension::Scalar; + using Vector = typename Dimension::Vector; + using KeyType = typename IncrementState::KeyType; + + // Constructors, destructor. + MassFluxPolicy(std::initializer_list depends = {}); + virtual ~MassFluxPolicy(); + + // Overload the methods describing how to update Fields. + virtual void update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) override; + + // Equivalence. + virtual bool operator==(const UpdatePolicyBase& rhs) const override; + +private: + //--------------------------- Private Interface ---------------------------// + MassFluxPolicy(const MassFluxPolicy& rhs); + MassFluxPolicy& operator=(const MassFluxPolicy& rhs); +}; + +} + +#else + +// Forward declaration. +namespace Spheral { + template class MassFluxPolicy; +} + +#endif diff --git a/src/GSPH/RiemannSolvers/GHLLCInst.cc.py b/src/GSPH/Policies/MassFluxPolicyInst.cc.py similarity index 73% rename from src/GSPH/RiemannSolvers/GHLLCInst.cc.py rename to src/GSPH/Policies/MassFluxPolicyInst.cc.py index db174ede1..79a4346cb 100644 --- a/src/GSPH/RiemannSolvers/GHLLCInst.cc.py +++ b/src/GSPH/Policies/MassFluxPolicyInst.cc.py @@ -2,10 +2,10 @@ //------------------------------------------------------------------------------ // Explicit instantiation. //------------------------------------------------------------------------------ +#include "GSPH/Policies/MassFluxPolicy.cc" #include "Geometry/Dimension.hh" -#include "GSPH/RiemannSolvers/GHLLC.cc" namespace Spheral { - template class GHLLC >; + template class MassFluxPolicy >; } """ diff --git a/src/GSPH/RiemannSolvers/GHLLC.cc b/src/GSPH/RiemannSolvers/GHLLC.cc deleted file mode 100644 index e26aebcfd..000000000 --- a/src/GSPH/RiemannSolvers/GHLLC.cc +++ /dev/null @@ -1,182 +0,0 @@ -//---------------------------------Spheral++----------------------------------// -// GHLLC -- HLLC with gravitational source term -// -// J.M. Pearl 2021 -//----------------------------------------------------------------------------// -#include "FileIO/FileIO.hh" -#include "DataBase/DataBase.hh" -#include "DataBase/State.hh" -#include "DataBase/StateDerivatives.hh" - -#include "Field/FieldList.hh" -#include "Neighbor/ConnectivityMap.hh" - -#include "Hydro/HydroFieldNames.hh" -#include "GSPH/GSPHFieldNames.hh" - -#include "GSPH/WaveSpeeds/WaveSpeedBase.hh" -#include "GSPH/Limiters/LimiterBase.hh" -#include "GSPH/RiemannSolvers/HLLC.hh" -#include "GSPH/RiemannSolvers/GHLLC.hh" - -#include - -namespace Spheral { - -//------------------------------------------------------------------------------ -// Constructor -//------------------------------------------------------------------------------ -template -GHLLC:: -GHLLC(LimiterBase& slopeLimiter, - WaveSpeedBase& waveSpeed, - const bool linearReconstruction, - const typename Dimension::Vector gravitationalAcceleration): - HLLC(slopeLimiter, - waveSpeed, - linearReconstruction), - mGravitationalAcceleration(gravitationalAcceleration){ - -} - -//------------------------------------------------------------------------------ -// Destructor -//------------------------------------------------------------------------------ -template -GHLLC:: -~GHLLC(){} - - -//------------------------------------------------------------------------------ -// Interface State fluid hydro -//------------------------------------------------------------------------------ -// template -// void -// GHLLC:: -// interfaceState(const int i, -// const int j, -// const int nodelisti, -// const int nodelistj, -// const typename Dimension::Vector& ri, -// const typename Dimension::Vector& rj, -// const typename Dimension::Scalar& rhoi, -// const typename Dimension::Scalar& rhoj, -// const typename Dimension::Scalar& ci, -// const typename Dimension::Scalar& cj, -// const typename Dimension::Scalar& Pi, -// const typename Dimension::Scalar& Pj, -// const typename Dimension::Vector& vi, -// const typename Dimension::Vector& vj, -// typename Dimension::Scalar& Pstar, -// typename Dimension::Vector& vstar, -// typename Dimension::Scalar& rhostari, -// typename Dimension::Scalar& rhostarj) const{ - -// // pressure + linear grav contribution -// const auto rhogh = 0.5*(rhoi+rhoj)*mGravitationalAcceleration.dot(ri-rj); -// const auto p1i = Pi - rhogh; -// const auto p1j = Pj + rhogh; -// HLLC::interfaceState(i, -// j, -// nodelisti, -// nodelistj, -// ri, -// rj, -// rhoi, -// rhoj, -// ci, -// cj, -// p1i, -// p1j, -// vi, -// vj, -// Pstar, -// vstar, -// rhostari, -// rhostarj); -// }// Scalar interface class - - -template -void -GHLLC:: -interfaceState(const int i, - const int j, - const int nodelisti, - const int nodelistj, - const typename Dimension::Vector& ri, - const typename Dimension::Vector& rj, - const typename Dimension::Scalar& rhoi, - const typename Dimension::Scalar& rhoj, - const typename Dimension::Scalar& ci, - const typename Dimension::Scalar& cj, - const typename Dimension::Scalar& Pi, - const typename Dimension::Scalar& Pj, - const typename Dimension::Vector& vi, - const typename Dimension::Vector& vj, - const typename Dimension::Vector& DpDxi, - const typename Dimension::Vector& DpDxj, - const typename Dimension::Tensor& DvDxi, - const typename Dimension::Tensor& DvDxj, - typename Dimension::Scalar& Pstar, - typename Dimension::Vector& vstar, - typename Dimension::Scalar& rhostari, - typename Dimension::Scalar& rhostarj) const{ - - // pressure + linear grav contribution - const auto rhogh = 0.5*(rhoi+rhoj)*mGravitationalAcceleration.dot(ri-rj); - const auto p1i = Pi - rhogh; - const auto p1j = Pj + rhogh; - HLLC::interfaceState(i, - j, - nodelisti, - nodelistj, - ri, - rj, - rhoi, - rhoj, - ci, - cj, - p1i, - p1j, - vi, - vj, - DpDxi, - DpDxj, - DvDxi, - DvDxj, - Pstar, - vstar, - rhostari, - rhostarj); -}// Scalar interface class - -template -void -GHLLC:: -interfaceState(const int /*i*/, - const int /*j*/, - const int /*nodelisti*/, - const int /*nodelistj*/, - const Vector& /*ri*/, - const Vector& /*rj*/, - const Scalar& /*rhoi*/, - const Scalar& /*rhoj*/, - const Scalar& /*ci*/, - const Scalar& /*cj*/, - const Scalar& /*Pi*/, - const Scalar& /*Pj*/, - const Vector& /*vi*/, - const Vector& /*vj*/, - const SymTensor& /*Si*/, - const SymTensor& /*Sj*/, - const Tensor& /*Di*/, - const Tensor& /*Dj*/, - Vector& /*Tstar*/, - Vector& /*vstar*/) const{ - - - -} - -} // spheral namespace \ No newline at end of file diff --git a/src/GSPH/RiemannSolvers/GHLLCInline.hh b/src/GSPH/RiemannSolvers/GHLLCInline.hh deleted file mode 100644 index ad222696e..000000000 --- a/src/GSPH/RiemannSolvers/GHLLCInline.hh +++ /dev/null @@ -1,21 +0,0 @@ -namespace Spheral { - - -//-------------------------------------------------------- -// setter/getter for the gravitational acceleration -//-------------------------------------------------------- -template -typename Dimension::Vector -GHLLC:: -gravitationalAcceleration() const{ - return mGravitationalAcceleration; -} - -template -void -GHLLC:: -gravitationalAcceleration(const typename Dimension::Vector g) { - mGravitationalAcceleration = g; -} - -} \ No newline at end of file diff --git a/src/GSPH/RiemannSolvers/HLLC.cc b/src/GSPH/RiemannSolvers/HLLC.cc index f108e8a83..17d824199 100644 --- a/src/GSPH/RiemannSolvers/HLLC.cc +++ b/src/GSPH/RiemannSolvers/HLLC.cc @@ -1,19 +1,11 @@ //---------------------------------Spheral++----------------------------------// // HLLC -- approximate riemann solver -// Toro E.F., Spruce M., Speares W., (1994) "Restoration of the Contact Surface in -// the HLL-Riemann Solver," Shock Waves, 4:25-34 +// Toro E.F., Spruce M., Speares W., (1994) "Restoration of the Contact +// Surface in the HLL-Riemann Solver," Shock Waves, 4:25-34 // // J.M. Pearl 2021 //----------------------------------------------------------------------------// -#include "FileIO/FileIO.hh" -#include "DataBase/DataBase.hh" -#include "DataBase/State.hh" -#include "DataBase/StateDerivatives.hh" - -#include "Field/FieldList.hh" -#include "Neighbor/ConnectivityMap.hh" - #include "Hydro/HydroFieldNames.hh" #include "GSPH/GSPHFieldNames.hh" @@ -47,134 +39,16 @@ HLLC:: ~HLLC(){} -//------------------------------------------------------------------------------ -// Interface State scalar -//------------------------------------------------------------------------------ -// template -// void -// HLLC:: -// interfaceState(const int i, -// const int j, -// const int nodelisti, -// const int nodelistj, -// const typename Dimension::Vector& ri, -// const typename Dimension::Vector& rj, -// const typename Dimension::Scalar& rhoi, -// const typename Dimension::Scalar& rhoj, -// const typename Dimension::Scalar& ci, -// const typename Dimension::Scalar& cj, -// const typename Dimension::Scalar& Pi, -// const typename Dimension::Scalar& Pj, -// const typename Dimension::Vector& vi, -// const typename Dimension::Vector& vj, -// typename Dimension::Scalar& Pstar, -// typename Dimension::Vector& vstar, -// typename Dimension::Scalar& /*rhostari*/, -// typename Dimension::Scalar& /*rhostarj*/) const{ - -// Scalar Si, Sj; - -// const auto tiny = std::numeric_limits::epsilon(); - -// const auto& limiter = this->limiter(); -// const auto& waveSpeedObject = this->waveSpeed(); - -// const auto& DpDx0 = this->DpDx(); -// const auto& DvDx0 = this->DvDx(); - -// const auto rij = ri - rj; -// const auto rhatij = rij.unitVector(); - -// vstar = 0.5*(vi+vj); -// Pstar = 0.5*(Pi+Pj); - -// if (ci > tiny or cj > tiny){ - - -// // default to nodal values -// auto v1i = vi; -// auto v1j = vj; - -// auto p1i = Pi; -// auto p1j = Pj; - -// // linear reconstruction -// if(this->linearReconstruction()){ - -// // gradients -// const auto DvDxi = DvDx0(nodelisti,i); -// const auto DvDxj = DvDx0(nodelistj,j); -// const auto DpDxi = DpDx0(nodelisti,i); -// const auto DpDxj = DpDx0(nodelistj,j); - -// // gradients along line of action -// if (true){ -// this->linearReconstruction(ri,rj, Pi,Pj, DpDxi,DpDxj, -// p1i,p1j); -// this->linearReconstruction(ri,rj, vi,vj, DvDxi,DvDxj, -// v1i,v1j); -// }else{ - -// const auto xij = 0.5*(rij); -// const auto Dpi = DpDxi.dot(xij); -// const auto Dpj = DpDxj.dot(xij); -// const auto Dvi = DvDxi.dot(xij); -// const auto Dvj = DvDxj.dot(xij); -// const auto Dui = Dvi.dot(rhatij); -// const auto Duj = Dvj.dot(rhatij); -// //const auto Dp0 = 0.5*(Pi-Pj); -// //const auto Du0 = 0.5*(vi-vj).dot(rhatij); -// const auto rui = Dui/(sgn(Duj)*std::max(tiny, abs(Duj))); -// const auto ruj = Duj/(sgn(Dui)*std::max(tiny, abs(Dui))); -// const auto xu = std::min(rui,ruj); -// const auto phiu = limiter.slopeLimiter(xu); - -// const auto rpi = Dpi/(sgn(Dpj)*std::max(tiny, abs(Dpj))); -// const auto rpj = Dpj/(sgn(Dpi)*std::max(tiny, abs(Dpi))); -// const auto xp = std::min(rpi,rpj); -// const auto phip = limiter.slopeLimiter(xp); - -// v1i = vi - phiu * Dvi; -// v1j = vj + phiu * Dvj; -// p1i = Pi - phip * Dpi; -// p1j = Pj + phip * Dpj; -// } - -// } - -// const auto ui = v1i.dot(rhatij); -// const auto uj = v1j.dot(rhatij); -// const auto wi = v1i - ui*rhatij; -// const auto wj = v1j - uj*rhatij; - -// waveSpeedObject.waveSpeed(rhoi,rhoj,ci,cj,ui,uj,Si,Sj); - -// const auto denom = safeInv(Si - Sj); - -// const auto ustar = (Si*ui - Sj*uj - p1i + p1j )*denom; -// const auto wstar = (Si*wi - Sj*wj)*denom; -// vstar = ustar*rhatij + wstar; -// Pstar = Sj * (ustar-uj) + p1j; - -// }else{ // if ci & cj too small punt to normal av -// const auto uij = std::min((vi-vj).dot(rhatij),0.0); -// Pstar += 0.25 * (rhoi+rhoj) * (uij*uij); -// } -// }// Scalar interface class - - //------------------------------------------------------------------------------ // Interface State fluid hydro //------------------------------------------------------------------------------ template void HLLC:: -interfaceState(const int i, - const int j, - const int nodelisti, - const int nodelistj, - const typename Dimension::Vector& ri, +interfaceState(const typename Dimension::Vector& ri, const typename Dimension::Vector& rj, + const typename Dimension::SymTensor& Hi, + const typename Dimension::SymTensor& Hj, const typename Dimension::Scalar& rhoi, const typename Dimension::Scalar& rhoj, const typename Dimension::Scalar& ci, @@ -183,14 +57,16 @@ interfaceState(const int i, const typename Dimension::Scalar& Pj, const typename Dimension::Vector& vi, const typename Dimension::Vector& vj, + const typename Dimension::Vector& DrhoDxi, + const typename Dimension::Vector& DrhoDxj, const typename Dimension::Vector& DpDxi, const typename Dimension::Vector& DpDxj, const typename Dimension::Tensor& DvDxi, const typename Dimension::Tensor& DvDxj, typename Dimension::Scalar& Pstar, typename Dimension::Vector& vstar, - typename Dimension::Scalar& /*rhostari*/, - typename Dimension::Scalar& /*rhostarj*/) const{ + typename Dimension::Scalar& rhostari, + typename Dimension::Scalar& rhostarj) const{ Scalar Si, Sj; @@ -203,6 +79,8 @@ interfaceState(const int i, vstar = 0.5*(vi+vj); Pstar = 0.5*(Pi+Pj); + rhostari = rhoi; + rhostarj = rhoj; if (ci > tiny or cj > tiny){ @@ -214,14 +92,19 @@ interfaceState(const int i, auto p1i = Pi; auto p1j = Pj; + //auto rho1i = rhoi; + //auto rho1j = rhoj; + // linear reconstruction if(this->linearReconstruction()){ // gradients along line of action - this->linearReconstruction(ri,rj, Pi,Pj, DpDxi,DpDxj, - p1i,p1j); - this->linearReconstruction(ri,rj, vi,vj, DvDxi,DvDxj, - v1i,v1j); + //this->linearReconstruction(ri,rj, rhoi,rhoj,DrhoDxi,DrhoDxj, //inputs + // rho1i,rho1j); //outputs + this->linearReconstruction(ri,rj, Pi,Pj,DpDxi,DpDxj, //inputs + p1i,p1j); //outputs + this->linearReconstruction(ri,rj, vi,vj,DvDxi,DvDxj, //inputs + v1i,v1j); //outputs } @@ -230,7 +113,8 @@ interfaceState(const int i, const auto wi = v1i - ui*rhatij; const auto wj = v1j - uj*rhatij; - waveSpeedObject.waveSpeed(rhoi,rhoj,ci,cj,ui,uj,Si,Sj); + waveSpeedObject.waveSpeed(rhoi,rhoj,ci,cj,ui,uj, //inputs + Si,Sj); //outputs const auto denom = safeInv(Si - Sj); @@ -238,23 +122,24 @@ interfaceState(const int i, const auto wstar = (Si*wi - Sj*wj)*denom; vstar = ustar*rhatij + wstar; Pstar = Sj * (ustar-uj) + p1j; + //rhostari = rho1i;// * (Si - ui)*safeInv(Si-ustar); + //rhostarj = rho1j;// * (Sj - uj)*safeInv(Sj-ustar); }else{ // if ci & cj too small punt to normal av const auto uij = std::min((vi-vj).dot(rhatij),0.0); Pstar += 0.25 * (rhoi+rhoj) * (uij*uij); } + }// Scalar interface class template void HLLC:: -interfaceState(const int /*i*/, - const int /*j*/, - const int /*nodelisti*/, - const int /*nodelistj*/, - const Vector& /*ri*/, +interfaceState(const Vector& /*ri*/, const Vector& /*rj*/, + const SymTensor& /*Hi*/, + const SymTensor& /*Hj*/, const Scalar& /*rhoi*/, const Scalar& /*rhoj*/, const Scalar& /*ci*/, diff --git a/src/GSPH/RiemannSolvers/HLLC.hh b/src/GSPH/RiemannSolvers/HLLC.hh index 8432a2fef..a401cc8a9 100644 --- a/src/GSPH/RiemannSolvers/HLLC.hh +++ b/src/GSPH/RiemannSolvers/HLLC.hh @@ -37,34 +37,11 @@ public: ~HLLC(); - // virtual - // void interfaceState(const int i, - // const int j, - // const int nodelisti, - // const int nodelistj, - // const Vector& ri, - // const Vector& rj, - // const Scalar& rhoi, - // const Scalar& rhoj, - // const Scalar& ci, - // const Scalar& cj, - // const Scalar& sigmai, - // const Scalar& sigmaj, - // const Vector& vi, - // const Vector& vj, - // Scalar& Pstar, - // Vector& vstar, - // Scalar& rhostari, - // Scalar& rhostarj) const override; - - // ^ temporary class to wrap the above ^ virtual - void interfaceState(const int i, - const int j, - const int nodelisti, - const int nodelistj, - const Vector& ri, + void interfaceState(const Vector& ri, const Vector& rj, + const SymTensor& Hi, + const SymTensor& Hj, const Scalar& rhoi, const Scalar& rhoj, const Scalar& ci, @@ -73,6 +50,8 @@ public: const Scalar& sigmaj, const Vector& vi, const Vector& vj, + const Vector& DrhoDxi, + const Vector& DrhoDxj, const Vector& DpDxi, const Vector& DpDxj, const Tensor& DvDxi, @@ -84,12 +63,10 @@ public: virtual - void interfaceState(const int i, - const int j, - const int nodelisti, - const int nodelistj, - const Vector& ri, + void interfaceState(const Vector& ri, const Vector& rj, + const SymTensor& Hi, + const SymTensor& Hj, const Scalar& rhoi, const Scalar& rhoj, const Scalar& ci, diff --git a/src/GSPH/RiemannSolvers/RiemannSolverBase.cc b/src/GSPH/RiemannSolvers/RiemannSolverBase.cc index 4e68e2f91..6fe5490c5 100644 --- a/src/GSPH/RiemannSolvers/RiemannSolverBase.cc +++ b/src/GSPH/RiemannSolvers/RiemannSolverBase.cc @@ -35,8 +35,6 @@ RiemannSolverBase(LimiterBase& slopeLimiter, mSlopeLimiter(slopeLimiter), mWaveSpeed(waveSpeed), mLinearReconstruction(linearReconstruction){ - //mDpDx(FieldStorageType::CopyFields), - //mDvDx(FieldStorageType::CopyFields){ } //------------------------------------------------------------------------------ @@ -52,92 +50,15 @@ RiemannSolverBase:: template void RiemannSolverBase:: -initialize(const DataBase& dataBase, - const State& state, - const StateDerivatives& derivs, - typename RiemannSolverBase::ConstBoundaryIterator boundaryBegin, - typename RiemannSolverBase::ConstBoundaryIterator boundaryEnd, +initialize(const DataBase& /*dataBase*/, + const State& /*state*/, + const StateDerivatives& /*derivs*/, + typename RiemannSolverBase::ConstBoundaryIterator /*boundaryBegin*/, + typename RiemannSolverBase::ConstBoundaryIterator /*boundaryEnd*/, const typename Dimension::Scalar /*time*/, const typename Dimension::Scalar /*dt*/, const TableKernel& /*W*/){ - // if(mLinearReconstruction){ - // dataBase.resizeFluidFieldList(mDpDx,Vector::zero,GSPHFieldNames::RiemannPressureGradient0,true); - // dataBase.resizeFluidFieldList(mDvDx,Tensor::zero,GSPHFieldNames::RiemannVelocityGradient0,true); - - // //const auto& DpDx0 = derivs.fields( GSPHFieldNames::pressureGradient, Vector::zero); - // //const auto& DpDxRaw0 = derivs.fields( GSPHFieldNames::pressureGradient+"RAW", Vector::zero); - // const auto& DvDx0 = derivs.fields( HydroFieldNames::velocityGradient,Tensor::zero); - // //const auto& localDvDx0 = derivs.fields( HydroFieldNames::internalVelocityGradient,Tensor::zero); - // //const auto& DvDxRaw0 = derivs.fields( HydroFieldNames::velocityGradient+"RAW",Tensor::zero); - // const auto& DvDt0 = derivs.fields(HydroFieldNames::hydroAcceleration, Vector::zero); - // const auto& rho0 = state.fields(HydroFieldNames::massDensity, 0.0); - - // const auto& connectivityMap = dataBase.connectivityMap(); - // const auto& nodeLists = connectivityMap.nodeLists(); - // const auto numNodeLists = nodeLists.size(); - - // // copy from previous time step - // for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { - // const auto& nodeList = nodeLists[nodeListi]; - // const auto ni = nodeList->numInternalNodes(); - // #pragma omp parallel for - // for (auto i = 0u; i < ni; ++i) { - // //const auto localDvDxi = localDvDx0(nodeListi,i); - // const auto DvDxi = DvDx0(nodeListi,i); - // //const auto DvDxRawi = DvDxRaw0(nodeListi,i); - // //const auto DpDxi = DpDx0(nodeListi,i); - // //const auto DpDxRawi = DpDxRaw0(nodeListi,i); - // const auto DvDti = DvDt0(nodeListi,i); - // const auto rhoi = rho0(nodeListi,i); - - // // this'll need some cleaning - // // switch(mGradientType){ - // // case GradientType::RiemannGradient: // default grad based on riemann soln - // mDvDx(nodeListi,i) = DvDxi; - // mDpDx(nodeListi,i) = -rhoi*DvDti; - // // break; - // // case GradientType::HydroAccelerationGradient: // based on hydro accel for DpDx - // // mDvDx(nodeListi,i) = DvDxi; - // // mDpDx(nodeListi,i) = -rhoi*DvDti; - // // break; - // // case GradientType::SPHGradient: // raw gradients - // // mDvDx(nodeListi,i) = DvDxRawi; - // // mDpDx(nodeListi,i) = DpDxRawi; - // // break; - // // case GradientType::MixedMethodGradient: // raw gradient for P riemann gradient for v - // // mDvDx(nodeListi,i) = DvDxi; - // // mDpDx(nodeListi,i) = DpDxRawi; - // // break; - // // case GradientType::OnlyDvDxGradient: // raw gradients - // // mDvDx(nodeListi,i) = DvDxi; - // // mDpDx(nodeListi,i) = Vector::zero; - // // break; - // // case GradientType::LocalDvDxGradient: // local velocity gradient - // // mDvDx(nodeListi,i) = localDvDxi; - // // mDpDx(nodeListi,i) = DpDxi; - // // break; - // // default : - // // mDvDx(nodeListi,i) = Tensor::zero; - // // mDpDx(nodeListi,i) = Vector::zero; - - // // } - // } - // } - - // for (auto boundItr = boundaryBegin; - // boundItr != boundaryEnd; - // ++boundItr) { - // (*boundItr)->applyFieldListGhostBoundary(mDpDx); - // (*boundItr)->applyFieldListGhostBoundary(mDvDx); - // } - - // for (auto boundItr = boundaryBegin; - // boundItr != boundaryEnd; - // ++boundItr) (*boundItr)->finalizeGhostBoundary(); - - // } // if LinearReconstruction - } // initialize method @@ -227,39 +148,15 @@ linearReconstruction(const typename Dimension::Vector& ri, //------------------------------------------------------------------------------ // default to non-op //------------------------------------------------------------------------------ -// template -// void -// RiemannSolverBase:: -// interfaceState(const int /*i*/, -// const int /*j*/, -// const int /*nodelisti*/, -// const int /*nodelistj*/, -// const Vector& /*ri*/, -// const Vector& /*rj*/, -// const Scalar& /*rhoi*/, -// const Scalar& /*rhoj*/, -// const Scalar& /*ci*/, -// const Scalar& /*cj*/, -// const Scalar& /*sigmai*/, -// const Scalar& /*sigmaj*/, -// const Vector& /*vi*/, -// const Vector& /*vj*/, -// Scalar& /*Pstar*/, -// Vector& /*vstar*/, -// Scalar& /*rhostari*/, -// Scalar& /*rhostarj*/) const{ -// } template void RiemannSolverBase:: -interfaceState(const int /*i*/, - const int /*j*/, - const int /*nodelisti*/, - const int /*nodelistj*/, - const Vector& /*ri*/, +interfaceState(const Vector& /*ri*/, const Vector& /*rj*/, + const SymTensor& /*Hi*/, + const SymTensor& /*Hj*/, const Scalar& /*rhoi*/, const Scalar& /*rhoj*/, const Scalar& /*ci*/, @@ -268,6 +165,8 @@ interfaceState(const int /*i*/, const Scalar& /*Pj*/, const Vector& /*vi*/, const Vector& /*vj*/, + const Vector& /*DrhoDxi*/, + const Vector& /*DrhoDxj*/, const Vector& /*DpDxi*/, const Vector& /*DpDxj*/, const Tensor& /*DvDxi*/, @@ -285,12 +184,10 @@ interfaceState(const int /*i*/, template void RiemannSolverBase:: -interfaceState(const int /*i*/, - const int /*j*/, - const int /*nodelisti*/, - const int /*nodelistj*/, - const Vector& /*ri*/, +interfaceState(const Vector& /*ri*/, const Vector& /*rj*/, + const SymTensor& /*Hi*/, + const SymTensor& /*Hj*/, const Scalar& /*rhoi*/, const Scalar& /*rhoj*/, const Scalar& /*ci*/, diff --git a/src/GSPH/RiemannSolvers/RiemannSolverBase.hh b/src/GSPH/RiemannSolvers/RiemannSolverBase.hh index fbdd996d2..e5aacffdf 100644 --- a/src/GSPH/RiemannSolvers/RiemannSolverBase.hh +++ b/src/GSPH/RiemannSolvers/RiemannSolverBase.hh @@ -46,33 +46,12 @@ public: const Scalar dt, const TableKernel& W); - // virtual - // void interfaceState(const int i, - // const int j, - // const int nodelisti, - // const int nodelistj, - // const Vector& ri, - // const Vector& rj, - // const Scalar& rhoi, - // const Scalar& rhoj, - // const Scalar& ci, - // const Scalar& cj, - // const Scalar& Pi, - // const Scalar& Pj, - // const Vector& vi, - // const Vector& vj, - // Scalar& Pstar, - // Vector& vstar, - // Scalar& rhostari, - // Scalar& rhostarj) const; virtual - void interfaceState(const int i, - const int j, - const int nodelisti, - const int nodelistj, - const Vector& ri, + void interfaceState(const Vector& ri, const Vector& rj, + const SymTensor& Hi, + const SymTensor& Hj, const Scalar& rhoi, const Scalar& rhoj, const Scalar& ci, @@ -81,6 +60,8 @@ public: const Scalar& Pj, const Vector& vi, const Vector& vj, + const Vector& DrhoDxi, + const Vector& DrhoDxj, const Vector& DpDxi, const Vector& DpDxj, const Tensor& DvDxi, @@ -92,12 +73,10 @@ public: virtual - void interfaceState(const int i, - const int j, - const int nodelisti, - const int nodelistj, - const Vector& ri, + void interfaceState(const Vector& ri, const Vector& rj, + const SymTensor& Hi, + const SymTensor& Hj, const Scalar& rhoi, const Scalar& rhoj, const Scalar& ci, @@ -118,9 +97,6 @@ public: bool linearReconstruction() const; void linearReconstruction(bool x); - - // GradientType gradientType() const; - // void gradientType(GradientType x); virtual void linearReconstruction(const Vector& ri, @@ -142,22 +118,13 @@ public: Vector& ytildei, Vector& ytildej) const; - // we'll want the ability to modify these (make better) - // FieldList& DpDx(); - // FieldList& DvDx(); - // const FieldList& DpDx() const; - // const FieldList& DvDx() const; private: LimiterBase& mSlopeLimiter; WaveSpeedBase& mWaveSpeed; bool mLinearReconstruction; - //GradientType mGradientType; - - // FieldList mDpDx; - // FieldList mDvDx; }; diff --git a/src/GSPH/RiemannSolvers/RiemannSolverBaseInline.hh b/src/GSPH/RiemannSolvers/RiemannSolverBaseInline.hh index 6df4c6f3a..d04dec1d2 100644 --- a/src/GSPH/RiemannSolvers/RiemannSolverBaseInline.hh +++ b/src/GSPH/RiemannSolvers/RiemannSolverBaseInline.hh @@ -41,44 +41,4 @@ linearReconstruction(bool x) { mLinearReconstruction=x; } - -//------------------------------------------------------------------------------ -// field getters -//------------------------------------------------------------------------------ - - -// template -// inline -// FieldList& -// RiemannSolverBase:: -// DpDx() { -// return mDpDx; -// } - -// template -// inline -// FieldList& -// RiemannSolverBase:: -// DvDx() { -// return mDvDx; -// } - - -// template -// inline -// const FieldList& -// RiemannSolverBase:: -// DpDx() const { -// return mDpDx; -// } - -// template -// inline -// const FieldList& -// RiemannSolverBase:: -// DvDx() const { -// return mDvDx; -// } - - } \ No newline at end of file diff --git a/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosity.cc b/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosity.cc new file mode 100644 index 000000000..a67dbfd26 --- /dev/null +++ b/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosity.cc @@ -0,0 +1,135 @@ +//---------------------------------Spheral++----------------------------------// +// SecondOrderArtificialViscosity +// Frontiere, Raskin, Owen (2017) "CRKSPH:- A Conservative Reproducing Kernel +// Smoothed Particle Hydrodynamics Scheme," J. Comp. Phys. +// +// This is a reimplementation of the LimitedArtificialViscosity class as a +// derivative of RiemannSolverBase so it can be used with GSPH derived +// classes +// +// J.M. Pearl 2021 +//----------------------------------------------------------------------------// + +#include "Hydro/HydroFieldNames.hh" +#include "GSPH/GSPHFieldNames.hh" + +#include "GSPH/WaveSpeeds/WaveSpeedBase.hh" +#include "GSPH/Limiters/LimiterBase.hh" +#include "GSPH/RiemannSolvers/SecondOrderArtificialViscosity.hh" + +#include + +namespace Spheral { + +//------------------------------------------------------------------------------ +// Constructor +//------------------------------------------------------------------------------ +template +SecondOrderArtificialViscosity:: +SecondOrderArtificialViscosity(const Scalar Cl, + const Scalar Cq, + LimiterBase& slopeLimiter, + WaveSpeedBase& waveSpeed, + const bool linearReconstruction): + RiemannSolverBase(slopeLimiter, + waveSpeed, + linearReconstruction), + mCl(Cl), + mCq(Cq){ + +} + +//------------------------------------------------------------------------------ +// Destructor +//------------------------------------------------------------------------------ +template +SecondOrderArtificialViscosity:: +~SecondOrderArtificialViscosity(){} + + +//------------------------------------------------------------------------------ +// Interface State fluid hydro +//------------------------------------------------------------------------------ +template +void +SecondOrderArtificialViscosity:: +interfaceState(const typename Dimension::Vector& ri, + const typename Dimension::Vector& rj, + const typename Dimension::SymTensor& Hi, + const typename Dimension::SymTensor& Hj, + const typename Dimension::Scalar& rhoi, + const typename Dimension::Scalar& rhoj, + const typename Dimension::Scalar& ci, + const typename Dimension::Scalar& cj, + const typename Dimension::Scalar& Pi, + const typename Dimension::Scalar& Pj, + const typename Dimension::Vector& vi, + const typename Dimension::Vector& vj, + const typename Dimension::Vector& /*DrhoDxi*/, + const typename Dimension::Vector& /*DrhoDxj*/, + const typename Dimension::Vector& /*DpDxi*/, + const typename Dimension::Vector& /*DpDxj*/, + const typename Dimension::Tensor& DvDxi, + const typename Dimension::Tensor& DvDxj, + typename Dimension::Scalar& Pstar, + typename Dimension::Vector& vstar, + typename Dimension::Scalar& /*rhostari*/, + typename Dimension::Scalar& /*rhostarj*/) const{ + + + const auto tiny = std::numeric_limits::epsilon(); + + const Vector rij = ri - rj; + const Vector rhatij = rij.unitVector(); + const Vector etaij = 0.5*(Hi+Hj)*rij; + + // default to nodal values + Vector v1i = vi; + Vector v1j = vj; + + // linear reconstruction + if(this->linearReconstruction()){ + + this->linearReconstruction(ri,rj, vi,vj,DvDxi,DvDxj, //inputs + v1i,v1j); //outputs + + } + const Vector vij = v1i-v1j; + const Scalar muij = std::max(0.0,-vij.dot(etaij)/(etaij.magnitude2() + tiny)); + const Scalar cij = 0.5*(ci+cj); + const Scalar rhoij = 2*rhoi*rhoj/(rhoi+rhoj); + Pstar = 0.5*(Pi+Pj) + + rhoij*muij*(this->Cl()*cij + +this->Cq()*muij); + vstar = 0.5*(vi+vj); + +}// Scalar interface class + + +template +void +SecondOrderArtificialViscosity:: +interfaceState(const Vector& /*ri*/, + const Vector& /*rj*/, + const SymTensor& /*Hi*/, + const SymTensor& /*Hj*/, + const Scalar& /*rhoi*/, + const Scalar& /*rhoj*/, + const Scalar& /*ci*/, + const Scalar& /*cj*/, + const Scalar& /*Pi*/, + const Scalar& /*Pj*/, + const Vector& /*vi*/, + const Vector& /*vj*/, + const SymTensor& /*Si*/, + const SymTensor& /*Sj*/, + const Tensor& /*Di*/, + const Tensor& /*Dj*/, + Vector& /*Tstar*/, + Vector& /*vstar*/) const{ + + + +} + +} // spheral namespace \ No newline at end of file diff --git a/src/GSPH/RiemannSolvers/GHLLC.hh b/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosity.hh similarity index 56% rename from src/GSPH/RiemannSolvers/GHLLC.hh rename to src/GSPH/RiemannSolvers/SecondOrderArtificialViscosity.hh index 951233334..3b7d91a0b 100644 --- a/src/GSPH/RiemannSolvers/GHLLC.hh +++ b/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosity.hh @@ -1,10 +1,19 @@ //---------------------------------Spheral++----------------------------------// -// GHLLC -- HLLC solver w/ gravitational source term +// SecondOrderArtificialViscosity +// Frontiere, Raskin, Owen (2017) "CRKSPH:- A Conservative Reproducing Kernel +// Smoothed Particle Hydrodynamics Scheme," J. Comp. Phys. +// +// This is a reimplementation of the LimitedArtificialViscosity class as a +// derivative of RiemannSolverBase so it can be used with GSPH derived +// classes +// +// J.M. Pearl 2021 //----------------------------------------------------------------------------// -#ifndef __Spheral_GHLLC_hh__ -#define __Spheral_GHLLC_hh__ -#include "HLLC.hh" +#ifndef __Spheral_SecondOrderArtificialViscosity_hh__ +#define __Spheral_SecondOrderArtificialViscosity_hh__ + +#include "RiemannSolverBase.hh" namespace Spheral { @@ -17,7 +26,7 @@ template class Field; template class FieldList; template -class GHLLC : public HLLC { +class SecondOrderArtificialViscosity : public RiemannSolverBase { typedef typename Dimension::Scalar Scalar; typedef typename Dimension::Vector Vector; @@ -26,40 +35,20 @@ class GHLLC : public HLLC { public: - GHLLC(LimiterBase& slopeLimiter, - WaveSpeedBase& waveSpeedBase, - const bool linearReconstruction, - const Vector gravitationalAcceleration); - - ~GHLLC(); - - // virtual - // void interfaceState(const int i, - // const int j, - // const int nodelisti, - // const int nodelistj, - // const Vector& ri, - // const Vector& rj, - // const Scalar& rhoi, - // const Scalar& rhoj, - // const Scalar& ci, - // const Scalar& cj, - // const Scalar& sigmai, - // const Scalar& sigmaj, - // const Vector& vi, - // const Vector& vj, - // Scalar& Pstar, - // Vector& vstar, - // Scalar& rhostari, - // Scalar& rhostarj) const override; + SecondOrderArtificialViscosity(const Scalar Cl, + const Scalar Cq, + LimiterBase& slopeLimiter, + WaveSpeedBase& waveSpeedBase, + const bool linearReconstruction); + + ~SecondOrderArtificialViscosity(); + virtual - void interfaceState(const int i, - const int j, - const int nodelisti, - const int nodelistj, - const Vector& ri, + void interfaceState(const Vector& ri, const Vector& rj, + const SymTensor& Hi, + const SymTensor& Hj, const Scalar& rhoi, const Scalar& rhoj, const Scalar& ci, @@ -68,6 +57,8 @@ public: const Scalar& sigmaj, const Vector& vi, const Vector& vj, + const Vector& DrhoDxi, + const Vector& DrhoDxj, const Vector& DpDxi, const Vector& DpDxj, const Tensor& DvDxi, @@ -77,13 +68,12 @@ public: Scalar& rhostari, Scalar& rhostarj) const override; + virtual - void interfaceState(const int i, - const int j, - const int nodelisti, - const int nodelistj, - const Vector& ri, + void interfaceState(const Vector& ri, const Vector& rj, + const SymTensor& Hi, + const SymTensor& Hj, const Scalar& rhoi, const Scalar& rhoj, const Scalar& ci, @@ -99,17 +89,21 @@ public: Vector& Tstar, Vector& vstar) const override; - - void gravitationalAcceleration(const Vector g); - Vector gravitationalAcceleration() const; + Scalar Cl() const; + void Cl(const Scalar x); + + Scalar Cq() const; + void Cq(const Scalar x); private: - Vector mGravitationalAcceleration; + Scalar mCl; + Scalar mCq; + }; } -#include "GHLLCInline.hh" +#include "SecondOrderArtificialViscosityInline.hh" #endif diff --git a/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosityInline.hh b/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosityInline.hh new file mode 100644 index 000000000..baa102bd7 --- /dev/null +++ b/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosityInline.hh @@ -0,0 +1,40 @@ +namespace Spheral { + +//------------------------------------------------------------------------------ +// set/get linear reconstruction switch +//------------------------------------------------------------------------------ +template +inline +typename Dimension::Scalar +SecondOrderArtificialViscosity:: +Cl() const { + return mCl; +} + +template +inline +void +SecondOrderArtificialViscosity:: +Cl(typename Dimension::Scalar x) { + mCl=x; +} + + +template +inline +typename Dimension::Scalar +SecondOrderArtificialViscosity:: +Cq() const { + return mCq; +} + +template +inline +void +SecondOrderArtificialViscosity:: +Cq(typename Dimension::Scalar x) { + mCq=x; +} + + +} \ No newline at end of file diff --git a/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosityInst.cc.py b/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosityInst.cc.py new file mode 100644 index 000000000..4aebeff1f --- /dev/null +++ b/src/GSPH/RiemannSolvers/SecondOrderArtificialViscosityInst.cc.py @@ -0,0 +1,11 @@ +text = """ +//------------------------------------------------------------------------------ +// Explicit instantiation. +//------------------------------------------------------------------------------ +#include "Geometry/Dimension.hh" +#include "GSPH/RiemannSolvers/SecondOrderArtificialViscosity.cc" + +namespace Spheral { + template class SecondOrderArtificialViscosity >; +} +""" diff --git a/src/GSPH/computeMFMDensity.cc b/src/GSPH/computeMFMDensity.cc index 41e1c0a6c..c617172aa 100644 --- a/src/GSPH/computeMFMDensity.cc +++ b/src/GSPH/computeMFMDensity.cc @@ -1,5 +1,7 @@ //---------------------------------Spheral++----------------------------------// // Compute the density from m/V +// +// J.M. Pearl 2022 //----------------------------------------------------------------------------// #include "GSPH/computeMFMDensity.hh" diff --git a/src/GSPH/computeMFMDensity.hh b/src/GSPH/computeMFMDensity.hh index ebfc8fae2..99f25822d 100644 --- a/src/GSPH/computeMFMDensity.hh +++ b/src/GSPH/computeMFMDensity.hh @@ -1,5 +1,7 @@ //---------------------------------Spheral++----------------------------------// // Compute the density from m/V +// +// J.M. Pearl 2022 //----------------------------------------------------------------------------// #ifndef __Spheral__computeMFMDensity__ diff --git a/src/GSPH/computeSPHVolume.cc b/src/GSPH/computeSPHVolume.cc index 0fba6577a..ce803c322 100644 --- a/src/GSPH/computeSPHVolume.cc +++ b/src/GSPH/computeSPHVolume.cc @@ -1,5 +1,7 @@ //---------------------------------Spheral++----------------------------------// // Compute the volume from m/rho +// +// J.M. Pearl 2022 //----------------------------------------------------------------------------// #include "GSPH/computeSPHVolume.hh" diff --git a/src/GSPH/computeSPHVolume.hh b/src/GSPH/computeSPHVolume.hh index c0c856faa..536101c43 100644 --- a/src/GSPH/computeSPHVolume.hh +++ b/src/GSPH/computeSPHVolume.hh @@ -1,5 +1,7 @@ //---------------------------------Spheral++----------------------------------// // Compute the volume from m/rho +// +// J.M. Pearl 2022 //----------------------------------------------------------------------------// #ifndef __Spheral__computeSPHVolume__ diff --git a/src/GSPH/computeSumVolume.cc b/src/GSPH/computeSumVolume.cc index 60e527051..5dd4126dc 100644 --- a/src/GSPH/computeSumVolume.cc +++ b/src/GSPH/computeSumVolume.cc @@ -1,5 +1,10 @@ //---------------------------------Spheral++----------------------------------// -// Compute volume from inverse of the kernel summation +// Compute volume from inverse of the kernel summation. +// +// Hopkins P.F. (2015) "A New Class of Accurate, Mesh-Free Hydrodynamic +// Simulation Methods," MNRAS, 450(1):53-110 +// +// J.M. Pearl 2022 //----------------------------------------------------------------------------// #include "GSPH/computeSumVolume.hh" diff --git a/src/GSPH/computeSumVolume.hh b/src/GSPH/computeSumVolume.hh index 1eec78308..283a3e19f 100644 --- a/src/GSPH/computeSumVolume.hh +++ b/src/GSPH/computeSumVolume.hh @@ -1,5 +1,10 @@ //---------------------------------Spheral++----------------------------------// -// Compute volume from inverse of the kernel summation +// Compute volume from inverse of the kernel summation. +// +// Hopkins P.F. (2015) "A New Class of Accurate, Mesh-Free Hydrodynamic +// Simulation Methods," MNRAS, 450(1):53-110 +// +// J.M. Pearl 2022 //----------------------------------------------------------------------------// #ifndef __Spheral__computeSumVolume__ diff --git a/src/GSPH/initializeGradients.cc b/src/GSPH/initializeGradients.cc new file mode 100644 index 000000000..baced1087 --- /dev/null +++ b/src/GSPH/initializeGradients.cc @@ -0,0 +1,155 @@ +//---------------------------------Spheral++----------------------------------// +// initializes the pressure and velocity gradients for Riemann solver - based +// SPH varients +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// + +#include "GSPH/initializeGradients.hh" +#include "Field/FieldList.hh" +#include "Neighbor/ConnectivityMap.hh" +#include "Kernel/TableKernel.hh" +#include "NodeList/NodeList.hh" + +namespace Spheral{ + +template +void +initializeGradients(const ConnectivityMap& connectivityMap, + const TableKernel& W, + const FieldList& position, + const FieldList& H, + const FieldList& volume, + const FieldList& pressure, + const FieldList& velocity, + FieldList& M, + FieldList& DpDx, + FieldList& DvDx) { + + typedef typename Dimension::Tensor Tensor; + + const auto& nodeLists = connectivityMap.nodeLists(); + const auto& pairs = connectivityMap.nodePairList(); + const auto npairs = pairs.size(); + const auto numNodeLists = nodeLists.size(); + + REQUIRE(volume.size() == numNodeLists); + REQUIRE(velocity.size() == numNodeLists); + REQUIRE(pressure.size() == numNodeLists); + REQUIRE(position.size() == numNodeLists); + REQUIRE(H.size() == numNodeLists); + + REQUIRE(DpDx.size() == numNodeLists); + REQUIRE(DvDx.size() == numNodeLists); + REQUIRE(M.size() == numNodeLists); + +#pragma omp parallel + { + // Thread private scratch variables + int i, j, nodeListi, nodeListj; + + typename SpheralThreads::FieldListStack threadStack; + auto M_thread = M.threadCopy(threadStack); + auto DpDx_thread = DpDx.threadCopy(threadStack); + auto DvDx_thread = DvDx.threadCopy(threadStack); + +#pragma omp for + for (auto kk = 0u; kk < npairs; ++kk) { + i = pairs[kk].i_node; + j = pairs[kk].j_node; + nodeListi = pairs[kk].i_list; + nodeListj = pairs[kk].j_list; + + // Get the state for node i. + const auto& vi = velocity(nodeListi, i); + const auto& Pi = pressure(nodeListi, i); + const auto& ri = position(nodeListi, i); + const auto& voli = volume(nodeListi, i); + const auto& Hi = H(nodeListi, i); + const auto Hdeti = Hi.Determinant(); + + CHECK(voli > 0.0); + CHECK(Hdeti > 0.0); + + auto& DpDxi = DpDx(nodeListi, i); + auto& DvDxi = DvDx(nodeListi, i); + auto& Mi = M(nodeListi, i); + + // Get the state for node j + const auto& vj = velocity(nodeListj, j); + const auto& Pj = pressure(nodeListj, j); + const auto& rj = position(nodeListj, j); + const auto& volj = volume(nodeListj, j); + const auto& Hj = H(nodeListj, j); + const auto Hdetj = Hj.Determinant(); + + CHECK(volj > 0.0); + CHECK(Hdetj > 0.0); + + auto& DpDxj = DpDx(nodeListj, j); + auto& DvDxj = DvDx(nodeListj, j); + auto& Mj = M(nodeListj, j); + + const auto rij = ri - rj; + const auto vij = vi - vj; + const auto Pij = Pi - Pj; + + const auto etai = Hi*rij; + const auto etaj = Hj*rij; + const auto etaMagi = etai.magnitude(); + const auto etaMagj = etaj.magnitude(); + + CHECK(etaMagi >= 0.0); + CHECK(etaMagj >= 0.0); + + const auto gWi = W.gradValue(etaMagi, Hdeti); + const auto Hetai = Hi*etai.unitVector(); + const auto gradWi = gWi*Hetai; + + const auto gWj = W.gradValue(etaMagj, Hdetj); + const auto Hetaj = Hj*etaj.unitVector(); + const auto gradWj = gWj*Hetaj; + + const auto gradPsii = volj*gradWi; + const auto gradPsij = voli*gradWj; + + Mi -= rij.dyad(gradPsii); + Mj -= rij.dyad(gradPsij); + + DpDxi -= Pij*gradPsii; + DpDxj -= Pij*gradPsij; + + DvDxi -= vij.dyad(gradPsii); + DvDxj -= vij.dyad(gradPsij); + + } // loop over pairs + // Reduce the thread values to the master. + threadReduceFieldLists(threadStack); + } // OpenMP parallel region + + // Finish up the spatial gradient calculation + for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { + const auto& nodeList = volume[nodeListi]->nodeList(); + const auto ni = nodeList.numInternalNodes(); +#pragma omp parallel for + for (auto i = 0u; i < ni; ++i) { + const auto numNeighborsi = connectivityMap.numNeighborsForNode(nodeListi, i); + + auto& Mi = M(nodeListi, i); + auto& DpDxi = DpDx(nodeListi, i); + auto& DvDxi = DvDx(nodeListi, i); + + const auto Mdeti = std::abs(Mi.Determinant()); + + const auto enoughNeighbors = numNeighborsi > Dimension::pownu(2); + const auto goodM = (Mdeti > 1e-2 and enoughNeighbors); + + Mi = ( goodM ? Mi.Inverse() : Tensor::one); + + DpDxi = Mi.Transpose()*DpDxi; + DvDxi = DvDxi*Mi; + + } // loop nodes + } // loop nodelists +} // function +} // spheral namespace diff --git a/src/GSPH/initializeGradients.hh b/src/GSPH/initializeGradients.hh new file mode 100644 index 000000000..8930dd563 --- /dev/null +++ b/src/GSPH/initializeGradients.hh @@ -0,0 +1,37 @@ +//---------------------------------Spheral++----------------------------------// +// initializes the pressure and velocity gradients for Riemann solver - based +// SPH varients +// +// J.M. Pearl 2023 +//----------------------------------------------------------------------------// + +#ifndef __Spheral__initializeGradients__ +#define __Spheral__initializeGradients__ + +#include + +namespace Spheral { + + // Forward declarations. + template class ConnectivityMap; + template class TableKernel; + template class FieldList; + + +template +void +initializeGradients(const ConnectivityMap& connectivityMap, + const TableKernel& W, + const FieldList& position, + const FieldList& H, + const FieldList& volume, + const FieldList& pressure, + const FieldList& velocity, + FieldList& M, + FieldList& DpDx, + FieldList& DvDx); + +} + + + #endif \ No newline at end of file diff --git a/src/GSPH/initializeGradientsInst.cc.py b/src/GSPH/initializeGradientsInst.cc.py new file mode 100644 index 000000000..d27852d9d --- /dev/null +++ b/src/GSPH/initializeGradientsInst.cc.py @@ -0,0 +1,22 @@ +text = """ +//------------------------------------------------------------------------------ +// Explicit instantiation. +//------------------------------------------------------------------------------ +#include "GSPH/initializeGradients.cc" +#include "Geometry/Dimension.hh" + +namespace Spheral { + + + template void initializeGradients(const ConnectivityMap >&, + const TableKernel >&, + const FieldList, Dim< %(ndim)s >::Vector>&, + const FieldList, Dim< %(ndim)s >::SymTensor>&, + const FieldList, Dim< %(ndim)s >::Scalar>&, + const FieldList, Dim< %(ndim)s >::Scalar>&, + const FieldList, Dim< %(ndim)s >::Vector>&, + FieldList, Dim< %(ndim)s >::Tensor>&, + FieldList, Dim< %(ndim)s >::Vector>&, + FieldList, Dim< %(ndim)s >::Tensor>&); +} +""" diff --git a/src/Hydro/CompatibleDifferenceSpecificThermalEnergyPolicy.cc b/src/Hydro/CompatibleDifferenceSpecificThermalEnergyPolicy.cc index 511390859..154900215 100644 --- a/src/Hydro/CompatibleDifferenceSpecificThermalEnergyPolicy.cc +++ b/src/Hydro/CompatibleDifferenceSpecificThermalEnergyPolicy.cc @@ -1,12 +1,12 @@ //---------------------------------Spheral++----------------------------------// -// CompatibleDifferenceSpecificThermalEnergyPolicy -- An implementation of UpdatePolicyBase -// specialized for the updating the specific thermal energy as a dependent -// quantity. +// CompatibleDifferenceSpecificThermalEnergyPolicy -- An implementation of +// UpdatePolicyBase specialized for the updating the specific thermal energy +// as a dependent quantity. // // This version is specialized for materials with different properties. A // compatible energy discretization in which pairwise work allows for opposite // sign pair-wise work. DepsDti and DepsDtj are used as weights and the -// difference between the conservative and consistent formulations is added +// difference between the conservative and consistent formulations is added // back in. //----------------------------------------------------------------------------// #include "Hydro/CompatibleDifferenceSpecificThermalEnergyPolicy.hh" diff --git a/src/NodeGenerators/CubicNodeGenerator.py b/src/NodeGenerators/CubicNodeGenerator.py index b7cca64ae..56edacbfb 100644 --- a/src/NodeGenerators/CubicNodeGenerator.py +++ b/src/NodeGenerators/CubicNodeGenerator.py @@ -59,9 +59,9 @@ def __init__(self, assert nxdomains * nydomains == mpi.procs # The number of nodes per domain. - nxperdomain = nx / nxdomains + nxperdomain = nx // nxdomains nxremainder = nx % nxdomains - nyperdomain = ny / nydomains + nyperdomain = ny // nydomains nyremainder = ny % nydomains assert nxremainder < nxdomains assert nyremainder < nydomains @@ -84,7 +84,7 @@ def __init__(self, # Compute our domain indicies. ixdomain = mpi.rank % nxdomains - iydomain = mpi.rank / nxdomains + iydomain = mpi.rank // nxdomains ixmin = nodeindex(ixdomain, nxperdomain, nxremainder) ixmax = nodeindex(ixdomain + 1, nxperdomain, nxremainder) iymin = nodeindex(iydomain, nyperdomain, nyremainder) diff --git a/src/PYB11/ANEOS/CMakeLists.txt b/src/PYB11/ANEOS/CMakeLists.txt index fb3653ffb..3928d40ca 100644 --- a/src/PYB11/ANEOS/CMakeLists.txt +++ b/src/PYB11/ANEOS/CMakeLists.txt @@ -1,2 +1,2 @@ -spheral_add_pybind11_library(ANEOS - INCLUDES ${SPHERAL_ROOT_DIR}/src/Pybind11Wraps/ANEOS) +spheral_add_pybind11_library(ANEOS SPHERAL_MODULE_LIST + INCLUDES ${SPHERAL_ROOT_DIR}/src/Pybind11Wraps/ANEOS SPHERAL_MODULE_LIST) diff --git a/src/PYB11/ArtificialConduction/CMakeLists.txt b/src/PYB11/ArtificialConduction/CMakeLists.txt index 5c44d52ed..f84b877e9 100644 --- a/src/PYB11/ArtificialConduction/CMakeLists.txt +++ b/src/PYB11/ArtificialConduction/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(ArtificialConduction) +spheral_add_pybind11_library(ArtificialConduction SPHERAL_MODULE_LIST) diff --git a/src/PYB11/ArtificialViscosity/CMakeLists.txt b/src/PYB11/ArtificialViscosity/CMakeLists.txt index bd55c1a13..61c6cb737 100644 --- a/src/PYB11/ArtificialViscosity/CMakeLists.txt +++ b/src/PYB11/ArtificialViscosity/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(ArtificialViscosity) +spheral_add_pybind11_library(ArtificialViscosity SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Boundary/CMakeLists.txt b/src/PYB11/Boundary/CMakeLists.txt index 7cbc49954..0e58f9f04 100644 --- a/src/PYB11/Boundary/CMakeLists.txt +++ b/src/PYB11/Boundary/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Boundary) +spheral_add_pybind11_library(Boundary SPHERAL_MODULE_LIST) diff --git a/src/PYB11/CMakeLists.txt b/src/PYB11/CMakeLists.txt index 62349e47d..5f315c582 100644 --- a/src/PYB11/CMakeLists.txt +++ b/src/PYB11/CMakeLists.txt @@ -1,3 +1,4 @@ +# When Python targets are created, they add the module name to SPHERAL_MODULE_LIST set (_python_packages CXXTypes CXXTests @@ -65,7 +66,9 @@ foreach(_python_package ${_python_packages}) add_subdirectory(${_python_package}) endforeach() -string(REPLACE ";" " " PYTHONPKGS "${_python_packages}") +# This global list is filled in each spheral_add_pybind11_library call +get_property(SPHERAL_MODULE_LIST GLOBAL PROPERTY SPHERAL_MODULE_LIST) +string(REPLACE ";" " " PYTHONPKGS "${SPHERAL_MODULE_LIST}") configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/SpheralCompiledPackages.py.in ${CMAKE_CURRENT_BINARY_DIR}/SpheralCompiledPackages.py) @@ -76,10 +79,7 @@ set(PYB11_python_targets ${CMAKE_CURRENT_BINARY_DIR}/SpheralCompiledPackages.py ) -install(FILES ${PYB11_python_targets} - DESTINATION Spheral - ) +spheral_install_python_files(${PYB11_python_targets}) install(FILES Spheral.pth - DESTINATION . - ) + DESTINATION ${SPHERAL_SITE_PACKAGES_PATH}) diff --git a/src/PYB11/CRKSPH/CMakeLists.txt b/src/PYB11/CRKSPH/CMakeLists.txt index 95da6e17b..cbf094303 100644 --- a/src/PYB11/CRKSPH/CMakeLists.txt +++ b/src/PYB11/CRKSPH/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(CRKSPH) +spheral_add_pybind11_library(CRKSPH SPHERAL_MODULE_LIST) diff --git a/src/PYB11/CXXTests/CMakeLists.txt b/src/PYB11/CXXTests/CMakeLists.txt index 0f0e06c14..0fb9f3d46 100644 --- a/src/PYB11/CXXTests/CMakeLists.txt +++ b/src/PYB11/CXXTests/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(CXXTests) +spheral_add_pybind11_library(CXXTests SPHERAL_MODULE_LIST) diff --git a/src/PYB11/CXXTypes/CMakeLists.txt b/src/PYB11/CXXTypes/CMakeLists.txt index f2da69bcc..7cd5128e3 100644 --- a/src/PYB11/CXXTypes/CMakeLists.txt +++ b/src/PYB11/CXXTypes/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(CXXTypes) +spheral_add_pybind11_library(CXXTypes SPHERAL_MODULE_LIST) diff --git a/src/PYB11/DEM/CMakeLists.txt b/src/PYB11/DEM/CMakeLists.txt index f3bca78b6..4ea9b8d57 100644 --- a/src/PYB11/DEM/CMakeLists.txt +++ b/src/PYB11/DEM/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(DEM) +spheral_add_pybind11_library(DEM SPHERAL_MODULE_LIST) diff --git a/src/PYB11/DEM/DEMBase.py b/src/PYB11/DEM/DEMBase.py index 0627717eb..3cc312c69 100644 --- a/src/PYB11/DEM/DEMBase.py +++ b/src/PYB11/DEM/DEMBase.py @@ -58,8 +58,7 @@ def initialize(time = "const Scalar", state = "State<%(Dimension)s>&", derivs = "StateDerivatives<%(Dimension)s>&"): "Initialize the DEM before we start a derivative evaluation." - return "void" - + return "void" @PYB11virtual @PYB11const @@ -104,20 +103,14 @@ def clearSolidBoundaries(self): "remove all solid boundaries from the dem package" return "void" - @PYB11const - def numSolidBoundaries(self): - "return the number of solid boundaries being tracked" - return "unsigned int" - @PYB11const def haveSolidBoundary(boundary = "const SolidBoundaryBase<%(Dimension)s>&"): "is this boundary being tracked?" return "bool" - - @PYB11const - def getSolidBoundaryUniqueIndex(x="const int"): - "Unique index for neighborIndices pairFieldList (returns -x-1)" - return "int" + + def removeSolidBoundary(boundary = "const SolidBoundaryBase<%(Dimension)s>&"): + "remove the specified solid boundary" + return "void" #........................................................................... # Properties @@ -145,6 +138,8 @@ def getSolidBoundaryUniqueIndex(x="const int"): DDtShearDisplacement = PYB11property("const FieldList<%(Dimension)s, vector>&","DDtShearDisplacement", returnpolicy="reference_internal") isActiveContact = PYB11property("const FieldList<%(Dimension)s, vector>&","isActiveContact", returnpolicy="reference_internal") + newSolidBoundaryIndex = PYB11property("int", "newSolidBoundaryIndex", doc="index of the most recent solid bc added to the package") + numSolidBoundaries = PYB11property("unsigned int", "numSolidBoundaries", doc="number of solid boundaries") numContacts = PYB11property("unsigned int", "numContacts", doc="Total number of contacts") numParticleParticleContacts = PYB11property("unsigned int", "numParticleParticleContacts", doc="Number of interactions with other dem particles") numParticleBoundaryContacts = PYB11property("unsigned int", "numParticleBoundaryContacts", doc="Number interactions with solid boundaries") diff --git a/src/PYB11/DEM/DEM_PYB11.py b/src/PYB11/DEM/DEM_PYB11.py index 7983d462f..fe7923454 100644 --- a/src/PYB11/DEM/DEM_PYB11.py +++ b/src/PYB11/DEM/DEM_PYB11.py @@ -26,6 +26,7 @@ '"DEM/SolidBoundary/CircularPlaneSolidBoundary.hh"', '"DEM/SolidBoundary/CylinderSolidBoundary.hh"', '"DEM/SolidBoundary/SphereSolidBoundary.hh"', + '"DEM/SolidBoundary/ClippedSphereSolidBoundary.hh"', '"FileIO/FileIO.hh"'] #------------------------------------------------------------------------------- @@ -46,6 +47,7 @@ CircularPlaneSolidBoundary%(ndim)id = PYB11TemplateClass(CircularPlaneSolidBoundary, template_parameters="%(Dimension)s") CylinderSolidBoundary%(ndim)id = PYB11TemplateClass(CylinderSolidBoundary, template_parameters="%(Dimension)s") SphereSolidBoundary%(ndim)id = PYB11TemplateClass(SphereSolidBoundary, template_parameters="%(Dimension)s") +ClippedSphereSolidBoundary%(ndim)id = PYB11TemplateClass(ClippedSphereSolidBoundary, template_parameters="%(Dimension)s") ''' % {"ndim" : ndim, "Dimension" : "Dim<" + str(ndim) + ">"}) diff --git a/src/PYB11/DEM/LinearSpringDEM.py b/src/PYB11/DEM/LinearSpringDEM.py index 7500c626c..1eb288cb6 100644 --- a/src/PYB11/DEM/LinearSpringDEM.py +++ b/src/PYB11/DEM/LinearSpringDEM.py @@ -26,6 +26,7 @@ def pyinit(dataBase = "const DataBase<%(Dimension)s>&", cohesiveTensileStrength = "const Scalar", shapeFactor = "const Scalar", stepsPerCollision = "const Scalar", + enableFastTimeStepping = "const bool", xmin = "const Vector&", xmax = "const Vector&"): "DEMBase constructor" @@ -68,7 +69,8 @@ def momentOfInertia(massi = "const Scalar", def setMomentOfInertia(self): return "void" - + + enableFastTimeStepping = PYB11property("bool", "enableFastTimeStepping", "enableFastTimeStepping", doc="activate fast time stepping") normalSpringConstant = PYB11property("Scalar", "normalSpringConstant", "normalSpringConstant", doc="normal spring constant") normalRestitutionCoefficient = PYB11property("Scalar", "normalRestitutionCoefficient", "normalRestitutionCoefficient", doc="normal restitution coefficient") tangentialSpringConstant = PYB11property("Scalar", "tangentialSpringConstant", "tangentialSpringConstant", doc="tangential spring constant") @@ -83,5 +85,6 @@ def setMomentOfInertia(self): shapeFactor = PYB11property("Scalar", "shapeFactor", "shapeFactor", doc="shape factor - simple approach to non-spherical particles") normalBeta = PYB11property("Scalar", "normalBeta", "normalBeta", doc="a damping parameter") tangentialBeta = PYB11property("Scalar", "tangentialBeta", "tangentialBeta", doc="a damping parameter") + collisionDuration = PYB11property("Scalar", "collisionDuration", "collisionDuration", doc="duration of a contact") momentOfInertia = PYB11property("const FieldList<%(Dimension)s, Scalar>&","momentOfInertia", returnpolicy="reference_internal") \ No newline at end of file diff --git a/src/PYB11/DEM/SolidBoundaries.py b/src/PYB11/DEM/SolidBoundaries.py index 8923e5d28..65ae21fe8 100644 --- a/src/PYB11/DEM/SolidBoundaries.py +++ b/src/PYB11/DEM/SolidBoundaries.py @@ -12,9 +12,11 @@ class SolidBoundaryBase: typedef typename %(Dimension)s::Scalar Scalar; typedef typename %(Dimension)s::Vector Vector; """ - def pyinit(): + def pyinit(self): "constructor for base class DEM solid boundary conditions" + uniqueIndex = PYB11property("int", "uniqueIndex", "uniqueIndex", doc="unique index for solid boundary") + PYB11inject(SolidBoundaryBaseAbstractMethods, SolidBoundaryBase, pure_virtual=True) #------------------------------------------------------------------------------- @@ -29,10 +31,18 @@ class InfinitePlaneSolidBoundary(SolidBoundaryBase): typedef typename %(Dimension)s::Vector Vector; """ - def pyinit(point = "const Vector&", + def pyinit(self, + point = "const Vector&", normal = "const Vector&"): "solid planar boundary" + @PYB11virtual + def registerState(self, + dataBase = "DataBase<%(Dimension)s>&", + state = "State<%(Dimension)s>&"): + "Register the state solid bc expects to use and evolve." + return "void" + @PYB11virtual def update(self, multiplier = "const double", @@ -43,7 +53,7 @@ def update(self, @PYB11virtual @PYB11const - def velocity(self, + def localVelocity(self, position = "const Vector&"): "velocity of bc." return "Vector" @@ -72,11 +82,19 @@ class RectangularPlaneSolidBoundary(SolidBoundaryBase): typedef typename %(Dimension)s::Tensor Tensor; """ - def pyinit(point = "const Vector&", + def pyinit(self, + point = "const Vector&", extent = "const Vector&", basis = "const Tensor&"): "solid planar boundary" + @PYB11virtual + def registerState(self, + dataBase = "DataBase<%(Dimension)s>&", + state = "State<%(Dimension)s>&"): + "Register the state solid bc expects to use and evolve." + return "void" + @PYB11virtual def update(self, multiplier = "const double", @@ -87,7 +105,7 @@ def update(self, @PYB11virtual @PYB11const - def velocity(self, + def localVelocity(self, position = "const Vector&"): "velocity of bc." return "Vector" @@ -116,11 +134,19 @@ class CircularPlaneSolidBoundary(SolidBoundaryBase): typedef typename %(Dimension)s::Vector Vector; """ - def pyinit(point = "const Vector&", + def pyinit(self, + point = "const Vector&", normal = "const Vector&", extent = "const Scalar"): "solid planar boundary" + @PYB11virtual + def registerState(self, + dataBase = "DataBase<%(Dimension)s>&", + state = "State<%(Dimension)s>&"): + "Register the state solid bc expects to use and evolve." + return "void" + @PYB11virtual def update(self, multiplier = "const double", @@ -131,7 +157,7 @@ def update(self, @PYB11virtual @PYB11const - def velocity(self, + def localVelocity(self, position = "const Vector&"): "velocity of bc." return "Vector" @@ -160,12 +186,20 @@ class CylinderSolidBoundary(SolidBoundaryBase): typedef typename %(Dimension)s::Vector Vector; """ - def pyinit(point = "const Vector&", + def pyinit(self, + point = "const Vector&", axis = "const Vector&", radius = "const Scalar", length = "const Scalar"): "solid planar boundary" + @PYB11virtual + def registerState(self, + dataBase = "DataBase<%(Dimension)s>&", + state = "State<%(Dimension)s>&"): + "Register the state solid bc expects to use and evolve." + return "void" + @PYB11virtual def update(self, multiplier = "const double", @@ -176,7 +210,7 @@ def update(self, @PYB11virtual @PYB11const - def velocity(self, + def localVelocity(self, position = "const Vector&"): "velocity of bc." return "Vector" @@ -196,23 +230,84 @@ def distance(self, #------------------------------------------------------------------------------- -# Cylinder solid boundary. In 2d this would be two planes. +# Sphere solid boundary. In 2d this would be a circle. #------------------------------------------------------------------------------- @PYB11template("Dimension") @PYB11module("SpheralDEM") class SphereSolidBoundary(SolidBoundaryBase): + PYB11typedefs = """ + typedef typename %(Dimension)s::Scalar Scalar; + typedef typename %(Dimension)s::Vector Vector; + typedef typename DEMDimension<%(Dimension)s>::AngularVector RotationType; + """ + + def pyinit(self, + center = "const Vector&", + radius = "const Scalar", + angularVelocity = "const RotationType&"): + "solid planar boundary" + + @PYB11virtual + def registerState(self, + dataBase = "DataBase<%(Dimension)s>&", + state = "State<%(Dimension)s>&"): + "Register the state solid bc expects to use and evolve." + return "void" + + @PYB11virtual + def update(self, + multiplier = "const double", + t = "const double", + dt = "const double",): + "distance vector to bc." + return "void" + + @PYB11virtual + @PYB11const + def localVelocity(self, + position = "const Vector&"): + "velocity of bc." + return "Vector" + + @PYB11virtual + @PYB11const + def distance(self, + position = "const Vector&"): + "distance vector to bc." + return "Vector" + + velocity = PYB11property("const Vector&", "velocity", "velocity", returnpolicy="reference_internal", doc="velocity of plane") + center = PYB11property("const Vector&", "center", "center", returnpolicy="reference_internal", doc="center of sphere") + radius = PYB11property("Scalar", "radius", "radius", doc="radius of sphere") + angularVelocity = PYB11property("const RotationType&", "angularVelocity", "angularVelocity", doc="rotation about center point") + +#------------------------------------------------------------------------------- +# Sphere solid boundary intersected with an infinite plane. +#------------------------------------------------------------------------------- +@PYB11template("Dimension") +@PYB11module("SpheralDEM") +class ClippedSphereSolidBoundary(SolidBoundaryBase): + PYB11typedefs = """ typedef typename %(Dimension)s::Scalar Scalar; typedef typename %(Dimension)s::Vector Vector; """ - def pyinit(center = "const Vector&", + def pyinit(self, + center = "const Vector&", radius = "const Scalar", clipPoint = "const Vector&", clipAxis = "const Vector&"): "solid planar boundary" + @PYB11virtual + def registerState(self, + dataBase = "DataBase<%(Dimension)s>&", + state = "State<%(Dimension)s>&"): + "Register the state solid bc expects to use and evolve." + return "void" + @PYB11virtual def update(self, multiplier = "const double", @@ -223,7 +318,7 @@ def update(self, @PYB11virtual @PYB11const - def velocity(self, + def localVelocity(self, position = "const Vector&"): "velocity of bc." return "Vector" @@ -239,4 +334,11 @@ def distance(self, center = PYB11property("const Vector&", "center", "center", returnpolicy="reference_internal", doc="center of sphere") radius = PYB11property("Scalar", "radius", "radius", doc="radius of sphere") clipPoint = PYB11property("const Vector&", "clipPoint", "clipPoint", returnpolicy="reference_internal", doc="point on clip plane") - clipAxis = PYB11property("const Vector&", "clipAxis", "clipAxis", returnpolicy="reference_internal", doc="normal in clip plane") \ No newline at end of file + clipAxis = PYB11property("const Vector&", "clipAxis", "clipAxis", returnpolicy="reference_internal", doc="normal in clip plane") + + +#PYB11inject(SolidBoundaryBaseAbstractMethods, SphereSolidBoundary, virtual=True, pure_virtual=False) +#PYB11inject(SolidBoundaryBaseAbstractMethods, InfinitePlaneSolidBoundary, virtual=True, pure_virtual=False) +#PYB11inject(SolidBoundaryBaseAbstractMethods, RectangularPlaneSolidBoundary, virtual=True, pure_virtual=False) +#PYB11inject(SolidBoundaryBaseAbstractMethods, CircularPlaneSolidBoundary, virtual=True, pure_virtual=False) +#PYB11inject(SolidBoundaryBaseAbstractMethods, CylinderSolidBoundary, virtual=True, pure_virtual=False) diff --git a/src/PYB11/DEM/SolidBoundaryBaseAbstractMethods.py b/src/PYB11/DEM/SolidBoundaryBaseAbstractMethods.py index d370930de..584c314fb 100644 --- a/src/PYB11/DEM/SolidBoundaryBaseAbstractMethods.py +++ b/src/PYB11/DEM/SolidBoundaryBaseAbstractMethods.py @@ -7,7 +7,7 @@ class SolidBoundaryBaseAbstractMethods: @PYB11const - def velocity(self, + def localVelocity(self, position = "const Vector&"): "velocity of bc." return "Vector" @@ -18,6 +18,12 @@ def distance(self, "distance vector to bc." return "Vector" + def registerState(self, + dataBase = "DataBase<%(Dimension)s>&", + state = "State<%(Dimension)s>&"): + "Register the state solid bc expects to use and evolve." + return "void" + def update(self, multiplier = "const double", t = "const double", diff --git a/src/PYB11/Damage/CMakeLists.txt b/src/PYB11/Damage/CMakeLists.txt index e0bcf9aca..fd99f8525 100644 --- a/src/PYB11/Damage/CMakeLists.txt +++ b/src/PYB11/Damage/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Damage) +spheral_add_pybind11_library(Damage SPHERAL_MODULE_LIST) diff --git a/src/PYB11/DataBase/CMakeLists.txt b/src/PYB11/DataBase/CMakeLists.txt index 282fae011..cbf20d21c 100644 --- a/src/PYB11/DataBase/CMakeLists.txt +++ b/src/PYB11/DataBase/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(DataBase) +spheral_add_pybind11_library(DataBase SPHERAL_MODULE_LIST) diff --git a/src/PYB11/DataOutput/CMakeLists.txt b/src/PYB11/DataOutput/CMakeLists.txt index 2fc511f0d..46f27e5dd 100644 --- a/src/PYB11/DataOutput/CMakeLists.txt +++ b/src/PYB11/DataOutput/CMakeLists.txt @@ -1,3 +1,3 @@ -spheral_add_pybind11_library(DataOutput +spheral_add_pybind11_library(DataOutput SPHERAL_MODULE_LIST SOURCES RestartableObject.cc - INCLUDES ${SPHERAL_ROOT_DIR}/src/Pybind11Wraps/DataOutput) + INCLUDES ${SPHERAL_ROOT_DIR}/src/Pybind11Wraps/DataOutput SPHERAL_MODULE_LIST) diff --git a/src/PYB11/DeviceTestLib/CMakeLists.txt b/src/PYB11/DeviceTestLib/CMakeLists.txt index 34cbcefba..d13d8afcd 100644 --- a/src/PYB11/DeviceTestLib/CMakeLists.txt +++ b/src/PYB11/DeviceTestLib/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(DeviceTestLib) +spheral_add_pybind11_library(DeviceTestLib SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Distributed/CMakeLists.txt b/src/PYB11/Distributed/CMakeLists.txt index e06115c23..3379683c1 100644 --- a/src/PYB11/Distributed/CMakeLists.txt +++ b/src/PYB11/Distributed/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Distributed) +spheral_add_pybind11_library(Distributed SPHERAL_MODULE_LIST) diff --git a/src/PYB11/ExternalForce/CMakeLists.txt b/src/PYB11/ExternalForce/CMakeLists.txt index d5d94ec18..ff6afcda9 100644 --- a/src/PYB11/ExternalForce/CMakeLists.txt +++ b/src/PYB11/ExternalForce/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(ExternalForce) +spheral_add_pybind11_library(ExternalForce SPHERAL_MODULE_LIST) diff --git a/src/PYB11/FSISPH/CMakeLists.txt b/src/PYB11/FSISPH/CMakeLists.txt index 7def85deb..87856b9f0 100644 --- a/src/PYB11/FSISPH/CMakeLists.txt +++ b/src/PYB11/FSISPH/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(FSISPH) +spheral_add_pybind11_library(FSISPH SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Field/CMakeLists.txt b/src/PYB11/Field/CMakeLists.txt index cef6c4d1b..cd57e205e 100644 --- a/src/PYB11/Field/CMakeLists.txt +++ b/src/PYB11/Field/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Field) +spheral_add_pybind11_library(Field SPHERAL_MODULE_LIST) diff --git a/src/PYB11/FieldList/ArithmeticFieldList.py b/src/PYB11/FieldList/ArithmeticFieldList.py index 06a07b16d..865a6022e 100644 --- a/src/PYB11/FieldList/ArithmeticFieldList.py +++ b/src/PYB11/FieldList/ArithmeticFieldList.py @@ -1,12 +1,13 @@ from PYB11Generator import * -from FieldList import * +import FieldList +import FieldListBase #------------------------------------------------------------------------------- # FieldList with numeric operations #------------------------------------------------------------------------------- @PYB11template("Dimension", "Value") @PYB11pycppname("FieldList") -class ArithmeticFieldList(FieldListBase): +class ArithmeticFieldList(FieldListBase.FieldListBase): PYB11typedefs = """ typedef FieldList<%(Dimension)s, %(Value)s> FieldListType; @@ -138,4 +139,4 @@ def localMax(self): #------------------------------------------------------------------------------- # Inject FieldList #------------------------------------------------------------------------------- -PYB11inject(FieldList, ArithmeticFieldList) +PYB11inject(FieldList.FieldList, ArithmeticFieldList) diff --git a/src/PYB11/FieldList/ArithmeticFieldList_PYB11.py b/src/PYB11/FieldList/ArithmeticFieldList_PYB11.py new file mode 100644 index 000000000..fb6b7e4f9 --- /dev/null +++ b/src/PYB11/FieldList/ArithmeticFieldList_PYB11.py @@ -0,0 +1,62 @@ +""" +Spheral ArithmeticFieldList module. + +Provides the ArithmeticFieldList classes. +""" + +from PYB11Generator import * +from SpheralCommon import * +from spheralDimensions import * +dims = spheralDimensions() + +from ArithmeticFieldList import * +from MinMaxFieldList import * + +#------------------------------------------------------------------------------- +# Includes +#------------------------------------------------------------------------------- +PYB11includes += ['"Geometry/Dimension.hh"', + '"Field/FieldBase.hh"', + '"Field/Field.hh"', + '"Field/FieldList.hh"', + '"Field/FieldListSet.hh"', + '"Utilities/FieldDataTypeTraits.hh"', + '"Utilities/DomainNode.hh"', + '"Geometry/CellFaceFlag.hh"', + ''] + +#------------------------------------------------------------------------------- +# Namespaces +#------------------------------------------------------------------------------- +PYB11namespaces = ["Spheral"] + +#------------------------------------------------------------------------------- +# Do our dimension dependent instantiations. +#------------------------------------------------------------------------------- +for ndim in dims: + + #........................................................................... + # arithmetic fields + for (value, label) in (("int", "Int"), + ("unsigned", "Unsigned"), + ("uint64_t", "ULL"), + ("Dim<%i>::Vector" % ndim, "Vector"), + ("Dim<%i>::Tensor" % ndim, "Tensor"), + ("Dim<%i>::ThirdRankTensor" % ndim, "ThirdRankTensor"), + ("Dim<%i>::FourthRankTensor" % ndim, "FourthRankTensor"), + ("Dim<%i>::FifthRankTensor" % ndim, "FifthRankTensor")): + exec(''' +%(label)sFieldList%(ndim)sd = PYB11TemplateClass(ArithmeticFieldList, template_parameters=("Dim<%(ndim)i>", "%(value)s")) +''' % {"ndim" : ndim, + "value" : value, + "label" : label}) + + #........................................................................... + # A few fields can apply the min/max with a scalar additionally + for (value, label) in (("double", "Scalar"), + ("Dim<%i>::SymTensor" % ndim, "SymTensor")): + exec(''' +%(label)sFieldList%(ndim)sd = PYB11TemplateClass(MinMaxFieldList, template_parameters=("Dim<%(ndim)i>", "%(value)s")) +''' % {"ndim" : ndim, + "value" : value, + "label" : label}) diff --git a/src/PYB11/FieldList/CMakeLists.txt b/src/PYB11/FieldList/CMakeLists.txt index fcaf75a06..31d0d6bf9 100644 --- a/src/PYB11/FieldList/CMakeLists.txt +++ b/src/PYB11/FieldList/CMakeLists.txt @@ -1 +1,2 @@ -spheral_add_pybind11_library(FieldList) +spheral_add_pybind11_library(FieldList SPHERAL_MODULE_LIST) +spheral_add_pybind11_library(ArithmeticFieldList SPHERAL_MODULE_LIST) diff --git a/src/PYB11/FieldList/FieldList_PYB11.py b/src/PYB11/FieldList/FieldList_PYB11.py index 44b93de36..919977646 100644 --- a/src/PYB11/FieldList/FieldList_PYB11.py +++ b/src/PYB11/FieldList/FieldList_PYB11.py @@ -11,8 +11,6 @@ from FieldListBase import * from FieldList import * -from ArithmeticFieldList import * -from MinMaxFieldList import * from FieldListSet import * #------------------------------------------------------------------------------- @@ -58,32 +56,6 @@ ("RKCoefficients>" % ndim, "RKCoefficients")): exec(''' %(label)sFieldList%(ndim)sd = PYB11TemplateClass(FieldList, template_parameters=("Dim<%(ndim)i>", "%(value)s")) -''' % {"ndim" : ndim, - "value" : value, - "label" : label}) - - #........................................................................... - # arithmetic fields - for (value, label) in (("int", "Int"), - ("unsigned", "Unsigned"), - ("uint64_t", "ULL"), - ("Dim<%i>::Vector" % ndim, "Vector"), - ("Dim<%i>::Tensor" % ndim, "Tensor"), - ("Dim<%i>::ThirdRankTensor" % ndim, "ThirdRankTensor"), - ("Dim<%i>::FourthRankTensor" % ndim, "FourthRankTensor"), - ("Dim<%i>::FifthRankTensor" % ndim, "FifthRankTensor")): - exec(''' -%(label)sFieldList%(ndim)sd = PYB11TemplateClass(ArithmeticFieldList, template_parameters=("Dim<%(ndim)i>", "%(value)s")) -''' % {"ndim" : ndim, - "value" : value, - "label" : label}) - - #........................................................................... - # A few fields can apply the min/max with a scalar addtionally - for (value, label) in (("double", "Scalar"), - ("Dim<%i>::SymTensor" % ndim, "SymTensor")): - exec(''' -%(label)sFieldList%(ndim)sd = PYB11TemplateClass(MinMaxFieldList, template_parameters=("Dim<%(ndim)i>", "%(value)s")) ''' % {"ndim" : ndim, "value" : value, "label" : label}) diff --git a/src/PYB11/FieldList/MinMaxFieldList.py b/src/PYB11/FieldList/MinMaxFieldList.py index ba7f3b81b..e07d6cba5 100644 --- a/src/PYB11/FieldList/MinMaxFieldList.py +++ b/src/PYB11/FieldList/MinMaxFieldList.py @@ -1,4 +1,6 @@ from PYB11Generator import * +import FieldList +import FieldListBase from ArithmeticFieldList import * #------------------------------------------------------------------------------- @@ -6,7 +8,7 @@ #------------------------------------------------------------------------------- @PYB11template("Dimension", "Value") @PYB11pycppname("FieldList") -class MinMaxFieldList(FieldListBase): +class MinMaxFieldList(FieldListBase.FieldListBase): PYB11typedefs = """ typedef FieldList<%(Dimension)s, %(Value)s> FieldListType; diff --git a/src/PYB11/FieldOperations/CMakeLists.txt b/src/PYB11/FieldOperations/CMakeLists.txt index 382841c08..88a97063e 100644 --- a/src/PYB11/FieldOperations/CMakeLists.txt +++ b/src/PYB11/FieldOperations/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(FieldOperations) +spheral_add_pybind11_library(FieldOperations SPHERAL_MODULE_LIST) diff --git a/src/PYB11/FileIO/CMakeLists.txt b/src/PYB11/FileIO/CMakeLists.txt index ebf301b6b..0bc255063 100644 --- a/src/PYB11/FileIO/CMakeLists.txt +++ b/src/PYB11/FileIO/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(FileIO) +spheral_add_pybind11_library(FileIO SPHERAL_MODULE_LIST) diff --git a/src/PYB11/GSPH/CMakeLists.txt b/src/PYB11/GSPH/CMakeLists.txt index 6cefb5c49..1d5145dfe 100644 --- a/src/PYB11/GSPH/CMakeLists.txt +++ b/src/PYB11/GSPH/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(GSPH) +spheral_add_pybind11_library(GSPH SPHERAL_MODULE_LIST) diff --git a/src/PYB11/GSPH/GSPHHydroBase.py b/src/PYB11/GSPH/GSPHHydroBase.py index 49c33231e..a8e8c2806 100644 --- a/src/PYB11/GSPH/GSPHHydroBase.py +++ b/src/PYB11/GSPH/GSPHHydroBase.py @@ -7,6 +7,7 @@ @PYB11template("Dimension") @PYB11module("SpheralGSPH") +@PYB11dynamic_attr class GSPHHydroBase(GenericRiemannHydro): PYB11typedefs = """ diff --git a/src/PYB11/GSPH/GSPH_PYB11.py b/src/PYB11/GSPH/GSPH_PYB11.py index c465528a4..f753cae9b 100644 --- a/src/PYB11/GSPH/GSPH_PYB11.py +++ b/src/PYB11/GSPH/GSPH_PYB11.py @@ -12,6 +12,7 @@ from GenericRiemannHydro import * from GSPHHydroBase import * from MFMHydroBase import * +from MFVHydroBase import * from WaveSpeeds import * from Limiters import * from RiemannSolvers import * @@ -22,6 +23,7 @@ PYB11includes += ['"GSPH/GenericRiemannHydro.hh"', '"GSPH/GSPHHydroBase.hh"', '"GSPH/MFMHydroBase.hh"', + '"GSPH/MFVHydroBase.hh"', '"GSPH/WaveSpeeds/WaveSpeedBase.hh"', '"GSPH/WaveSpeeds/AcousticWaveSpeed.hh"', '"GSPH/WaveSpeeds/DavisWaveSpeed.hh"', @@ -32,9 +34,10 @@ '"GSPH/Limiters/VanAlbaLimiter.hh"', '"GSPH/Limiters/SuperbeeLimiter.hh"', '"GSPH/Limiters/OspreLimiter.hh"', + '"GSPH/Limiters/BarthJespersenLimiter.hh"', '"GSPH/RiemannSolvers/RiemannSolverBase.hh"', '"GSPH/RiemannSolvers/HLLC.hh"', - '"GSPH/RiemannSolvers/GHLLC.hh"', + '"GSPH/RiemannSolvers/SecondOrderArtificialViscosity.hh"', '"FileIO/FileIO.hh"'] #------------------------------------------------------------------------------- @@ -49,7 +52,15 @@ "HydroAccelerationGradient", "SPHGradient", "MixedMethodGradient", - "SPHSameTimeGradient"), export_values = True) + "SPHSameTimeGradient", + "SPHUncorrectedGradient", + "NoGradient"), export_values = True) + +NodeMotionType = PYB11enum(("Lagrangian", + "Eulerian", + "Fician", + "XSPH", + "BackgroundPressure"), export_values = False) #------------------------------------------------------------------------------- # Instantiate our types @@ -59,6 +70,7 @@ GenericRiemannHydro%(ndim)id = PYB11TemplateClass(GenericRiemannHydro, template_parameters="%(Dimension)s") GSPHHydroBase%(ndim)id = PYB11TemplateClass(GSPHHydroBase, template_parameters="%(Dimension)s") MFMHydroBase%(ndim)id = PYB11TemplateClass(MFMHydroBase, template_parameters="%(Dimension)s") +MFVHydroBase%(ndim)id = PYB11TemplateClass(MFVHydroBase, template_parameters="%(Dimension)s") WaveSpeedBase%(ndim)id = PYB11TemplateClass(WaveSpeedBase, template_parameters="%(Dimension)s") AcousticWaveSpeed%(ndim)id = PYB11TemplateClass(AcousticWaveSpeed, template_parameters="%(Dimension)s") DavisWaveSpeed%(ndim)id = PYB11TemplateClass(DavisWaveSpeed, template_parameters="%(Dimension)s") @@ -69,9 +81,10 @@ VanAlbaLimiter%(ndim)id = PYB11TemplateClass(VanAlbaLimiter, template_parameters="%(Dimension)s") SuperbeeLimiter%(ndim)id = PYB11TemplateClass(SuperbeeLimiter, template_parameters="%(Dimension)s") OspreLimiter%(ndim)id = PYB11TemplateClass(OspreLimiter, template_parameters="%(Dimension)s") +BarthJespersenLimiter%(ndim)id = PYB11TemplateClass(BarthJespersenLimiter, template_parameters="%(Dimension)s") RiemannSolverBase%(ndim)id = PYB11TemplateClass(RiemannSolverBase, template_parameters="%(Dimension)s") HLLC%(ndim)id = PYB11TemplateClass(HLLC, template_parameters="%(Dimension)s") -GHLLC%(ndim)id = PYB11TemplateClass(GHLLC, template_parameters="%(Dimension)s") +SecondOrderArtificialViscosity%(ndim)id = PYB11TemplateClass(SecondOrderArtificialViscosity, template_parameters="%(Dimension)s") ''' % {"ndim" : ndim, "Dimension" : "Dim<" + str(ndim) + ">"}) diff --git a/src/PYB11/GSPH/GenericRiemannHydro.py b/src/PYB11/GSPH/GenericRiemannHydro.py index 6db20d7e5..57dcfd80d 100644 --- a/src/PYB11/GSPH/GenericRiemannHydro.py +++ b/src/PYB11/GSPH/GenericRiemannHydro.py @@ -7,6 +7,7 @@ @PYB11template("Dimension") @PYB11module("SpheralGSPH") +@PYB11dynamic_attr class GenericRiemannHydro(Physics): PYB11typedefs = """ @@ -146,7 +147,7 @@ def enforceBoundaries(state = "State<%(Dimension)s>&", cfl = PYB11property("Scalar", "cfl", "cfl", doc="The Courant-Friedrichs-Lewy timestep limit multiplier") specificThermalEnergyDiffusionCoefficient = PYB11property("Scalar", "specificThermalEnergyDiffusionCoefficient", "specificThermalEnergyDiffusionCoefficient", doc="coefficient used to diffuse specificThermalEnergy amongst like nodes.") - riemannSolver = PYB11property("RiemannSolverBase<%(Dimension)s>&", "riemannSolver",returnpolicy="reference_internal",doc="The object defining the interface state construction.") + riemannSolver = PYB11property("RiemannSolverBase<%(Dimension)s>&", "riemannSolver", doc="The object defining the interface state construction.") kernel = PYB11property("const TableKernel<%(Dimension)s>&", "kernel", doc="The interpolation kernel") gradientType = PYB11property("GradientType", "gradientType", "gradientType", doc="Enum to selecting different gradients we can use") @@ -178,6 +179,7 @@ def enforceBoundaries(state = "State<%(Dimension)s>&", timeStepMask = PYB11property("const FieldList<%(Dimension)s, int>&", "timeStepMask", returnpolicy="reference_internal") pressure = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "pressure", returnpolicy="reference_internal") + volume = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "volume", returnpolicy="reference_internal") soundSpeed = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "soundSpeed", returnpolicy="reference_internal") Hideal = PYB11property("const FieldList<%(Dimension)s, SymTensor>&","Hideal", returnpolicy="reference_internal") normalization = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "normalization", returnpolicy="reference_internal") diff --git a/src/PYB11/GSPH/Limiters.py b/src/PYB11/GSPH/Limiters.py index d2cb8d647..b3e85d8f8 100644 --- a/src/PYB11/GSPH/Limiters.py +++ b/src/PYB11/GSPH/Limiters.py @@ -177,4 +177,32 @@ def fluxLimiter(self, "slope limiter from flux limiter." return "Scalar" +#------------------------------------------------------------------------------- +# Barth Jespersen limiter +#------------------------------------------------------------------------------- +@PYB11template("Dimension") +@PYB11module("SpheralGSPH") +class BarthJespersenLimiter(LimiterBase): + + PYB11typedefs = """ + typedef typename %(Dimension)s::Scalar Scalar; + """ + + def pyinit(): + "Barth Jespersen slope limiter constructor" + + @PYB11virtual + @PYB11const + def slopeLimiter(self, + x = "const Scalar"): + "slope limiter from flux limiter." + return "Scalar" + + @PYB11virtual + @PYB11const + def fluxLimiter(self, + x = "const Scalar"): + "slope limiter from flux limiter." + return "Scalar" + diff --git a/src/PYB11/GSPH/MFMHydroBase.py b/src/PYB11/GSPH/MFMHydroBase.py index b713433ee..50e413746 100644 --- a/src/PYB11/GSPH/MFMHydroBase.py +++ b/src/PYB11/GSPH/MFMHydroBase.py @@ -7,6 +7,7 @@ @PYB11template("Dimension") @PYB11module("SpheralGSPH") +@PYB11dynamic_attr class MFMHydroBase(GenericRiemannHydro): PYB11typedefs = """ diff --git a/src/PYB11/GSPH/MFVHydroBase.py b/src/PYB11/GSPH/MFVHydroBase.py new file mode 100644 index 000000000..61b857f06 --- /dev/null +++ b/src/PYB11/GSPH/MFVHydroBase.py @@ -0,0 +1,120 @@ +#------------------------------------------------------------------------------- +# GSPHHydroBase +#------------------------------------------------------------------------------- +from PYB11Generator import * +from GenericRiemannHydro import * +from RestartMethods import * + +@PYB11template("Dimension") +@PYB11module("SpheralGSPH") +@PYB11dynamic_attr +class MFVHydroBase(GenericRiemannHydro): + + PYB11typedefs = """ + typedef typename %(Dimension)s::Scalar Scalar; + typedef typename %(Dimension)s::Vector Vector; + typedef typename %(Dimension)s::Tensor Tensor; + typedef typename %(Dimension)s::SymTensor SymTensor; + typedef typename Physics<%(Dimension)s>::TimeStepType TimeStepType; +""" + + def pyinit(smoothingScaleMethod = "const SmoothingScaleBase<%(Dimension)s>&", + dataBase = "DataBase<%(Dimension)s>&", + riemannSolver = "RiemannSolverBase<%(Dimension)s>&", + W = "const TableKernel<%(Dimension)s>&", + epsDiffusionCoeff = "const Scalar", + cfl = "const double", + useVelocityMagnitudeForDt = "const bool", + compatibleEnergyEvolution = "const bool", + evolveTotalEnergy = "const bool", + XSPH = "const bool", + correctVelocityGradient = "const bool", + nodeMotionCoefficient = "const double", + nodeMotionType = "const NodeMotionType", + gradType = "const GradientType", + densityUpdate = "const MassDensityType", + HUpdate = "const HEvolutionType", + epsTensile = "const double", + nTensile = "const double", + xmin = "const Vector&", + xmax = "const Vector&"): + "GSPHHydroBase constructor" + + #........................................................................... + # Virtual methods + + @PYB11virtual + def initializeProblemStartup(dataBase = "DataBase<%(Dimension)s>&"): + "Tasks we do once on problem startup." + return "void" + + @PYB11virtual + def registerState(dataBase = "DataBase<%(Dimension)s>&", + state = "State<%(Dimension)s>&"): + "Register the state Hydro expects to use and evolve." + return "void" + + @PYB11virtual + def registerDerivatives(dataBase = "DataBase<%(Dimension)s>&", + derivs = "StateDerivatives<%(Dimension)s>&"): + "Register the derivatives/change fields for updating state." + return "void" + + @PYB11virtual + def preStepInitialize(self, + dataBase = "const DataBase<%(Dimension)s>&", + state = "State<%(Dimension)s>&", + derivs = "StateDerivatives<%(Dimension)s>&"): + "Optional hook to be called at the beginning of a time step." + return "void" + + @PYB11virtual + def initialize(time = "const Scalar", + dt = "const Scalar", + dataBase = "const DataBase<%(Dimension)s>&", + state = "State<%(Dimension)s>&", + derivs = "StateDerivatives<%(Dimension)s>&"): + "Initialize the Hydro before we start a derivative evaluation." + return "void" + + @PYB11virtual + @PYB11const + def evaluateDerivatives(time = "const Scalar", + dt = "const Scalar", + dataBase = "const DataBase<%(Dimension)s>&", + state = "const State<%(Dimension)s>&", + derivs = "StateDerivatives<%(Dimension)s>&"): + """Evaluate the derivatives for the principle hydro +mass density, velocity, and specific thermal energy.""" + return "void" + + @PYB11virtual + @PYB11const + def finalizeDerivatives(time = "const Scalar", + dt = "const Scalar", + dataBase = "const DataBase<%(Dimension)s>&", + state = "const State<%(Dimension)s>&", + derivs = "StateDerivatives<%(Dimension)s>&"): + "Finalize the derivatives." + return "void" + + @PYB11virtual + def applyGhostBoundaries(state = "State<%(Dimension)s>&", + derivs = "StateDerivatives<%(Dimension)s>&"): + "Apply boundary conditions to the physics specific fields." + return "void" + + @PYB11virtual + def enforceBoundaries(state = "State<%(Dimension)s>&", + derivs = "StateDerivatives<%(Dimension)s>&"): + "Enforce boundary conditions for the physics specific fields." + return "void" + + DvolumeDt = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "DvolumeDt", returnpolicy="reference_internal") + nodeMotionCoefficient = PYB11property("double", "nodeMotionCoefficient", "nodeMotionCoefficient",doc="multiplier for XSPH and Fician node motion schemes.") + nodeMotionType = PYB11property("NodeMotionType","nodeMotionType","nodeMotionType") + +#------------------------------------------------------------------------------- +# Inject methods +#------------------------------------------------------------------------------- +PYB11inject(RestartMethods, MFVHydroBase) diff --git a/src/PYB11/GSPH/RiemannSolvers.py b/src/PYB11/GSPH/RiemannSolvers.py index 3c1c4a1db..1f3206327 100644 --- a/src/PYB11/GSPH/RiemannSolvers.py +++ b/src/PYB11/GSPH/RiemannSolvers.py @@ -25,11 +25,6 @@ def pyinit(slopeLimiter = "LimiterBase<%(Dimension)s>&", waveSpeed = PYB11property("WaveSpeedBase<%(Dimension)s>&", "waveSpeed",returnpolicy="reference_internal", doc="wave speed object") limiter = PYB11property("LimiterBase<%(Dimension)s>&", "limiter",returnpolicy="reference_internal", doc="slope limiter object") - #DpDx = PYB11property("const FieldList<%(Dimension)s, Vector>&", "DpDx",returnpolicy="reference_internal") - #DvDx = PYB11property("const FieldList<%(Dimension)s, Tensor>&", "DvDx",returnpolicy="reference_internal") - -#PYB11inject(RiemannSolverBaseAbstractMethods, RiemannSolverBase, pure_virtual=True) - #------------------------------------------------------------------------------- # HLLC Approximate Riemann Solver #------------------------------------------------------------------------------- @@ -47,10 +42,11 @@ def pyinit(slopeLimiter = "LimiterBase<%(Dimension)s>&", linearReconstruction = "const bool"): "slope limiter constructor" + #------------------------------------------------------------------------------- -# HLLC Approximate Riemann Solver with constant grav acceleration +# HLLC Approximate Riemann Solver #------------------------------------------------------------------------------- -class GHLLC(HLLC): +class SecondOrderArtificialViscosity(RiemannSolverBase): PYB11typedefs = """ typedef typename %(Dimension)s::Scalar Scalar; @@ -59,11 +55,12 @@ class GHLLC(HLLC): typedef typename %(Dimension)s::SymTensor SymTensor; """ - def pyinit(slopeLimiter = "LimiterBase<%(Dimension)s>&", + def pyinit(Cl = "const Scalar", + Cq = "const Scalar", + slopeLimiter = "LimiterBase<%(Dimension)s>&", waveSpeed = "WaveSpeedBase<%(Dimension)s>&", - linearReconstruction = "const bool", - gravitationalAcceleration = "const Vector"): + linearReconstruction = "const bool"): "slope limiter constructor" - - gravitationalAcceleration = PYB11property("Vector", "gravitationalAcceleration", "gravitationalAcceleration", doc="constant gravitational acceleration vector") + Cl = PYB11property("Scalar", "Cl", "Cl", doc="linear artificial viscosity coefficient") + Cq = PYB11property("Scalar", "Cq", "Cq", doc="quadratic artificial viscosity coefficient") diff --git a/src/PYB11/Geometry/CMakeLists.txt b/src/PYB11/Geometry/CMakeLists.txt index dc4597126..c7897781f 100644 --- a/src/PYB11/Geometry/CMakeLists.txt +++ b/src/PYB11/Geometry/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Geometry) +spheral_add_pybind11_library(Geometry SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Gravity/CMakeLists.txt b/src/PYB11/Gravity/CMakeLists.txt index 8a2771873..f5ebde927 100644 --- a/src/PYB11/Gravity/CMakeLists.txt +++ b/src/PYB11/Gravity/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Gravity) +spheral_add_pybind11_library(Gravity SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Helmholtz/CMakeLists.txt b/src/PYB11/Helmholtz/CMakeLists.txt index 7728c4db7..282432baa 100644 --- a/src/PYB11/Helmholtz/CMakeLists.txt +++ b/src/PYB11/Helmholtz/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Helmholtz) +spheral_add_pybind11_library(Helmholtz SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Hydro/CMakeLists.txt b/src/PYB11/Hydro/CMakeLists.txt index 02fa9f751..8184a0f4a 100644 --- a/src/PYB11/Hydro/CMakeLists.txt +++ b/src/PYB11/Hydro/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Hydro) +spheral_add_pybind11_library(Hydro SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Integrator/CMakeLists.txt b/src/PYB11/Integrator/CMakeLists.txt index 7849fe7c4..ece5355cc 100644 --- a/src/PYB11/Integrator/CMakeLists.txt +++ b/src/PYB11/Integrator/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Integrator) +spheral_add_pybind11_library(Integrator SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Kernel/CMakeLists.txt b/src/PYB11/Kernel/CMakeLists.txt index d6594a020..9bb940469 100644 --- a/src/PYB11/Kernel/CMakeLists.txt +++ b/src/PYB11/Kernel/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Kernel) +spheral_add_pybind11_library(Kernel SPHERAL_MODULE_LIST) diff --git a/src/PYB11/KernelIntegrator/CMakeLists.txt b/src/PYB11/KernelIntegrator/CMakeLists.txt index ccdc9b032..f1c3df1c4 100644 --- a/src/PYB11/KernelIntegrator/CMakeLists.txt +++ b/src/PYB11/KernelIntegrator/CMakeLists.txt @@ -1 +1,2 @@ -spheral_add_pybind11_library(KernelIntegrator) +spheral_add_pybind11_library(KernelIntegrator SPHERAL_MODULE_LIST) +spheral_add_pybind11_library(IntegrationKernels SPHERAL_MODULE_LIST) diff --git a/src/PYB11/KernelIntegrator/IntegrationCoefficient.py b/src/PYB11/KernelIntegrator/IntegrationCoefficient.py index 39ab6a73b..505e4d251 100644 --- a/src/PYB11/KernelIntegrator/IntegrationCoefficient.py +++ b/src/PYB11/KernelIntegrator/IntegrationCoefficient.py @@ -60,24 +60,4 @@ def evaluateCoefficient(self, kid = "const KernelIntegrationData<%(Dimension)s>&"): "Return a value for the coefficient" return "%(CoefficientType)s" - -@PYB11template("Dimension", "CoefficientType") -@PYB11holder("std::shared_ptr") -class IntegralDependsOnCoefficient: - def pyinit(self): - "Choose coefficient for integral (coefficient defaults to one)" - - coefficient = PYB11property(doc="The coefficient", - getter = "getCoefficient", - setter = "setCoefficient") -@PYB11template("Dimension", "CoefficientType") -@PYB11holder("std::shared_ptr") -class IntegralDependsOnFieldListCoefficient: - def pyinit(self): - "Choose coefficient for integral" - - coefficient = PYB11property(doc="The coefficient", - getter = "getCoefficient", - setter = "setCoefficient") - diff --git a/src/PYB11/KernelIntegrator/IntegrationKernels_PYB11.py b/src/PYB11/KernelIntegrator/IntegrationKernels_PYB11.py new file mode 100644 index 000000000..85f7fcaa1 --- /dev/null +++ b/src/PYB11/KernelIntegrator/IntegrationKernels_PYB11.py @@ -0,0 +1,90 @@ +""" +Spheral IntegrationKernels module +""" + +from PYB11Generator import * +from SpheralCommon import * +from spheralDimensions import * +dims = spheralDimensions() + +from BilinearIndex import * +from FlatConnectivity import * +from KernelIntegrationData import * +from IntegrationCoefficient import * +from IntegrationKernel import * +from KernelIntegrator import * +from ManufacturedSolution import * +from RKIntegrationKernel import * +from SPHIntegrationKernel import * + +#------------------------------------------------------------------------------- +# Includes +#------------------------------------------------------------------------------- +PYB11includes += ['', + '', + '', + '', + '', + '"Boundary/Boundary.hh"', + '"DataBase/DataBase.hh"', + '"DataBase/State.hh"', + '"Field/FieldList.hh"', + '"Geometry/GeomPlane.hh"', + '"Hydro/HydroFieldNames.hh"', + '"KernelIntegrator/BilinearIndex.hh"', + '"KernelIntegrator/FlatConnectivity.hh"', + '"KernelIntegrator/IntegrationCoefficient.hh"', + '"KernelIntegrator/IntegrationKernel.hh"', + '"KernelIntegrator/KernelIntegrationData.hh"', + '"KernelIntegrator/KernelIntegrator.hh"', + '"KernelIntegrator/ManufacturedSolution.hh"', + '"KernelIntegrator/RKIntegrationKernel.hh"', + '"KernelIntegrator/SPHIntegrationKernel.hh"'] + +#------------------------------------------------------------------------------- +# Namespaces +#------------------------------------------------------------------------------- +PYB11namespaces = ["Spheral"] + +#------------------------------------------------------------------------------- +# Instantiations +#------------------------------------------------------------------------------- +for ndim in dims: + # Dimension-dependent + exec(''' +BilinearIndex%(ndim)id = PYB11TemplateClass(BilinearIndex, template_parameters="Dim<%(ndim)i>") +FlatConnectivity%(ndim)id = PYB11TemplateClass(FlatConnectivity, template_parameters="Dim<%(ndim)i>") +IntegrationKernel%(ndim)id = PYB11TemplateClass(IntegrationKernel, template_parameters="Dim<%(ndim)i>") +SPHIntegrationKernel%(ndim)id = PYB11TemplateClass(SPHIntegrationKernel, template_parameters="Dim<%(ndim)i>") +KernelIntegrator%(ndim)id = PYB11TemplateClass(KernelIntegrator, template_parameters="Dim<%(ndim)i>") +KernelIntegrationData%(ndim)id = PYB11TemplateClass(KernelIntegrationData, template_parameters="Dim<%(ndim)i>") +ManufacturedFunction%(ndim)id = PYB11TemplateClass(ManufacturedFunction, template_parameters="Dim<%(ndim)i>") +ManufacturedSteadyStateFunction%(ndim)id = PYB11TemplateClass(ManufacturedSteadyStateFunction, template_parameters="Dim<%(ndim)i>") +ManufacturedConstantFunction%(ndim)id = PYB11TemplateClass(ManufacturedConstantFunction, template_parameters="Dim<%(ndim)i>") +ManufacturedSinusoidalFunction%(ndim)id = PYB11TemplateClass(ManufacturedSinusoidalFunction, template_parameters="Dim<%(ndim)i>") +ManufacturedWaveFunction%(ndim)id = PYB11TemplateClass(ManufacturedWaveFunction, template_parameters="Dim<%(ndim)i>") +ManufacturedTransportSolution%(ndim)id = PYB11TemplateClass(ManufacturedTransportSolution, template_parameters="Dim<%(ndim)i>") +''' % {"ndim" : ndim}) + + # Dependent on primitives + dim_types = (("Dim<%i>::Scalar" % ndim, "Scalar"), + ("Dim<%i>::Vector" % ndim, "Vector"), + ("Dim<%i>::SymTensor" % ndim, "SymTensor"), + ("Dim<%i>::Tensor" % ndim, "Tensor"), + ("std::vector::Scalar>" % ndim, "StdVectorScalar"), + ("std::vector::Vector>" % ndim, "StdVectorVector")) + for (value, label) in dim_types: + exec(''' +%(label)sConstantIntegrationCoefficient%(ndim)id = PYB11TemplateClass(ConstantIntegrationCoefficient, template_parameters=("Dim<%(ndim)i>", "%(value)s")) +%(label)sIntegrationCoefficient%(ndim)id = PYB11TemplateClass(IntegrationCoefficient, template_parameters=("Dim<%(ndim)i>", "%(value)s")) +%(label)sFieldListIntegrationCoefficient%(ndim)id = PYB11TemplateClass(FieldListIntegrationCoefficient, template_parameters=("Dim<%(ndim)i>", "%(value)s")) +''' % {"ndim" : ndim, + "value" : value, + "label" : label}) + + # Dependent on order + for order in (0, 1, 2, 3, 4, 5, 6, 7): + exec(''' +RKIntegrationKernel%(ndim)id%(order)s = PYB11TemplateClass(RKIntegrationKernel, template_parameters=("Dim<%(ndim)i>", "%(order)s")) +''' % {"ndim" : ndim, + "order" : order}) diff --git a/src/PYB11/KernelIntegrator/KernelIntegral.py b/src/PYB11/KernelIntegrator/KernelIntegral.py index d37a9583d..0f6dbd113 100644 --- a/src/PYB11/KernelIntegrator/KernelIntegral.py +++ b/src/PYB11/KernelIntegrator/KernelIntegral.py @@ -1,5 +1,24 @@ from PYB11Generator import * -from IntegrationCoefficient import * + +@PYB11template("Dimension", "CoefficientType") +@PYB11holder("std::shared_ptr") +class IntegralDependsOnCoefficient: + def pyinit(self): + "Choose coefficient for integral (coefficient defaults to one)" + + coefficient = PYB11property(doc="The coefficient", + getter = "getCoefficient", + setter = "setCoefficient") + +@PYB11template("Dimension", "CoefficientType") +@PYB11holder("std::shared_ptr") +class IntegralDependsOnFieldListCoefficient: + def pyinit(self): + "Choose coefficient for integral" + + coefficient = PYB11property(doc="The coefficient", + getter = "getCoefficient", + setter = "setCoefficient") @PYB11template("Dimension") @PYB11holder("std::shared_ptr") diff --git a/src/PYB11/KernelIntegrator/KernelIntegrator_PYB11.py b/src/PYB11/KernelIntegrator/KernelIntegrator_PYB11.py index 1967f71c3..eead96d2a 100644 --- a/src/PYB11/KernelIntegrator/KernelIntegrator_PYB11.py +++ b/src/PYB11/KernelIntegrator/KernelIntegrator_PYB11.py @@ -7,41 +7,12 @@ from spheralDimensions import * dims = spheralDimensions() -from BilinearIndex import * -from FlatConnectivity import * -from KernelIntegrationData import * -from IntegrationCoefficient import * -from IntegrationKernel import * from KernelIntegral import * -from KernelIntegrator import * -from ManufacturedSolution import * -from RKIntegrationKernel import * -from SPHIntegrationKernel import * #------------------------------------------------------------------------------- # Includes #------------------------------------------------------------------------------- -PYB11includes += ['', - '', - '', - '', - '', - '"Boundary/Boundary.hh"', - '"DataBase/DataBase.hh"', - '"DataBase/State.hh"', - '"Field/FieldList.hh"', - '"Geometry/GeomPlane.hh"', - '"Hydro/HydroFieldNames.hh"', - '"KernelIntegrator/BilinearIndex.hh"', - '"KernelIntegrator/FlatConnectivity.hh"', - '"KernelIntegrator/IntegrationCoefficient.hh"', - '"KernelIntegrator/IntegrationKernel.hh"', - '"KernelIntegrator/KernelIntegral.hh"', - '"KernelIntegrator/KernelIntegrationData.hh"', - '"KernelIntegrator/KernelIntegrator.hh"', - '"KernelIntegrator/ManufacturedSolution.hh"', - '"KernelIntegrator/RKIntegrationKernel.hh"', - '"KernelIntegrator/SPHIntegrationKernel.hh"'] +PYB11includes += ['"KernelIntegrator/KernelIntegral.hh"'] #------------------------------------------------------------------------------- # Namespaces @@ -54,12 +25,6 @@ for ndim in dims: # Dimension-dependent exec(''' -BilinearIndex%(ndim)id = PYB11TemplateClass(BilinearIndex, template_parameters="Dim<%(ndim)i>") -FlatConnectivity%(ndim)id = PYB11TemplateClass(FlatConnectivity, template_parameters="Dim<%(ndim)i>") -IntegrationKernel%(ndim)id = PYB11TemplateClass(IntegrationKernel, template_parameters="Dim<%(ndim)i>") -SPHIntegrationKernel%(ndim)id = PYB11TemplateClass(SPHIntegrationKernel, template_parameters="Dim<%(ndim)i>") -KernelIntegrator%(ndim)id = PYB11TemplateClass(KernelIntegrator, template_parameters="Dim<%(ndim)i>") -KernelIntegrationData%(ndim)id = PYB11TemplateClass(KernelIntegrationData, template_parameters="Dim<%(ndim)i>") KernelIntegralBase%(ndim)id = PYB11TemplateClass(KernelIntegralBase, template_parameters="Dim<%(ndim)i>") LinearKernel%(ndim)id = PYB11TemplateClass(LinearKernel, template_parameters="Dim<%(ndim)i>") LinearGrad%(ndim)id = PYB11TemplateClass(LinearGrad, template_parameters="Dim<%(ndim)i>") @@ -80,12 +45,6 @@ BilinearSurfaceNormalKernelDotGrad%(ndim)id = PYB11TemplateClass(BilinearSurfaceNormalKernelDotGrad, template_parameters="Dim<%(ndim)i>") CellCoefficient%(ndim)id = PYB11TemplateClass(CellCoefficient, template_parameters="Dim<%(ndim)i>") SurfaceNormalCoefficient%(ndim)id = PYB11TemplateClass(SurfaceNormalCoefficient, template_parameters="Dim<%(ndim)i>") -ManufacturedFunction%(ndim)id = PYB11TemplateClass(ManufacturedFunction, template_parameters="Dim<%(ndim)i>") -ManufacturedSteadyStateFunction%(ndim)id = PYB11TemplateClass(ManufacturedSteadyStateFunction, template_parameters="Dim<%(ndim)i>") -ManufacturedConstantFunction%(ndim)id = PYB11TemplateClass(ManufacturedConstantFunction, template_parameters="Dim<%(ndim)i>") -ManufacturedSinusoidalFunction%(ndim)id = PYB11TemplateClass(ManufacturedSinusoidalFunction, template_parameters="Dim<%(ndim)i>") -ManufacturedWaveFunction%(ndim)id = PYB11TemplateClass(ManufacturedWaveFunction, template_parameters="Dim<%(ndim)i>") -ManufacturedTransportSolution%(ndim)id = PYB11TemplateClass(ManufacturedTransportSolution, template_parameters="Dim<%(ndim)i>") ''' % {"ndim" : ndim}) # Dependent on primitives @@ -97,9 +56,6 @@ ("std::vector::Vector>" % ndim, "StdVectorVector")) for (value, label) in dim_types: exec(''' -%(label)sConstantIntegrationCoefficient%(ndim)id = PYB11TemplateClass(ConstantIntegrationCoefficient, template_parameters=("Dim<%(ndim)i>", "%(value)s")) -%(label)sIntegrationCoefficient%(ndim)id = PYB11TemplateClass(IntegrationCoefficient, template_parameters=("Dim<%(ndim)i>", "%(value)s")) -%(label)sFieldListIntegrationCoefficient%(ndim)id = PYB11TemplateClass(FieldListIntegrationCoefficient, template_parameters=("Dim<%(ndim)i>", "%(value)s")) %(label)sIntegralDependsOnCoefficient%(ndim)id = PYB11TemplateClass(IntegralDependsOnCoefficient, template_parameters=("Dim<%(ndim)i>", "%(value)s")) %(label)sIntegralDependsOnFieldListCoefficient%(ndim)id = PYB11TemplateClass(IntegralDependsOnFieldListCoefficient, template_parameters=("Dim<%(ndim)i>", "%(value)s")) %(label)sLinearIntegral%(ndim)id = PYB11TemplateClass(LinearIntegral, template_parameters=("Dim<%(ndim)i>", "%(value)s")) @@ -130,9 +86,3 @@ "value" : value, "label" : label}) - # Dependent on order - for order in (0, 1, 2, 3, 4, 5, 6, 7): - exec(''' -RKIntegrationKernel%(ndim)id%(order)s = PYB11TemplateClass(RKIntegrationKernel, template_parameters=("Dim<%(ndim)i>", "%(order)s")) -''' % {"ndim" : ndim, - "order" : order}) diff --git a/src/PYB11/Material/CMakeLists.txt b/src/PYB11/Material/CMakeLists.txt index 6e8e716bf..bf5451e39 100644 --- a/src/PYB11/Material/CMakeLists.txt +++ b/src/PYB11/Material/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Material) +spheral_add_pybind11_library(Material SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Mesh/CMakeLists.txt b/src/PYB11/Mesh/CMakeLists.txt index c540ead3e..dc506f232 100644 --- a/src/PYB11/Mesh/CMakeLists.txt +++ b/src/PYB11/Mesh/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Mesh) +spheral_add_pybind11_library(Mesh SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Neighbor/CMakeLists.txt b/src/PYB11/Neighbor/CMakeLists.txt index 593c0eb1c..5e8f1400f 100644 --- a/src/PYB11/Neighbor/CMakeLists.txt +++ b/src/PYB11/Neighbor/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Neighbor) +spheral_add_pybind11_library(Neighbor SPHERAL_MODULE_LIST) diff --git a/src/PYB11/NodeGenerators/CMakeLists.txt b/src/PYB11/NodeGenerators/CMakeLists.txt index a290129cb..2503281b1 100644 --- a/src/PYB11/NodeGenerators/CMakeLists.txt +++ b/src/PYB11/NodeGenerators/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(NodeGenerators) +spheral_add_pybind11_library(NodeGenerators SPHERAL_MODULE_LIST) diff --git a/src/PYB11/NodeList/CMakeLists.txt b/src/PYB11/NodeList/CMakeLists.txt index 20df18b4d..55484b8d9 100644 --- a/src/PYB11/NodeList/CMakeLists.txt +++ b/src/PYB11/NodeList/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(NodeList) +spheral_add_pybind11_library(NodeList SPHERAL_MODULE_LIST) diff --git a/src/PYB11/OpenMP/CMakeLists.txt b/src/PYB11/OpenMP/CMakeLists.txt index 7330e8208..a704dde86 100644 --- a/src/PYB11/OpenMP/CMakeLists.txt +++ b/src/PYB11/OpenMP/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(OpenMP) +spheral_add_pybind11_library(OpenMP SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Physics/CMakeLists.txt b/src/PYB11/Physics/CMakeLists.txt index 41877686b..61f57969d 100644 --- a/src/PYB11/Physics/CMakeLists.txt +++ b/src/PYB11/Physics/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Physics) +spheral_add_pybind11_library(Physics SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Porosity/CMakeLists.txt b/src/PYB11/Porosity/CMakeLists.txt index 2e6b33f04..7297d43ea 100644 --- a/src/PYB11/Porosity/CMakeLists.txt +++ b/src/PYB11/Porosity/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Porosity) +spheral_add_pybind11_library(Porosity SPHERAL_MODULE_LIST) diff --git a/src/PYB11/RK/CMakeLists.txt b/src/PYB11/RK/CMakeLists.txt index 47390c663..93ae0df86 100644 --- a/src/PYB11/RK/CMakeLists.txt +++ b/src/PYB11/RK/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(RK) +spheral_add_pybind11_library(RK SPHERAL_MODULE_LIST) diff --git a/src/PYB11/SPH/CMakeLists.txt b/src/PYB11/SPH/CMakeLists.txt index 0edd0012f..86faf08d8 100644 --- a/src/PYB11/SPH/CMakeLists.txt +++ b/src/PYB11/SPH/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(SPH) +spheral_add_pybind11_library(SPH SPHERAL_MODULE_LIST) diff --git a/src/PYB11/SVPH/CMakeLists.txt b/src/PYB11/SVPH/CMakeLists.txt index 266df8232..bd78ae13c 100644 --- a/src/PYB11/SVPH/CMakeLists.txt +++ b/src/PYB11/SVPH/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(SVPH) +spheral_add_pybind11_library(SVPH SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Silo/CMakeLists.txt b/src/PYB11/Silo/CMakeLists.txt index c82e3adf6..4749870b0 100644 --- a/src/PYB11/Silo/CMakeLists.txt +++ b/src/PYB11/Silo/CMakeLists.txt @@ -1,8 +1,8 @@ # Ignore -Wterminate warning that is thrown from DBC.hh VERIFY2 if (${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") - set_source_files_properties(SpheralSilo.cc PROPERTIES COMPILE_FLAGS -Wno-terminate) + set_source_files_properties(SpheralSilo SPHERAL_MODULE_LIST.cc PROPERTIES COMPILE_FLAGS -Wno-terminate) endif() -spheral_add_pybind11_library(Silo - INCLUDES ${SPHERAL_ROOT_DIR}/src/Pybind11Wraps/Silo) +spheral_add_pybind11_library(Silo SPHERAL_MODULE_LIST + INCLUDES ${SPHERAL_ROOT_DIR}/src/Pybind11Wraps/Silo SPHERAL_MODULE_LIST) diff --git a/src/PYB11/SolidMaterial/CMakeLists.txt b/src/PYB11/SolidMaterial/CMakeLists.txt index 592b32f37..d6c07c99b 100644 --- a/src/PYB11/SolidMaterial/CMakeLists.txt +++ b/src/PYB11/SolidMaterial/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(SolidMaterial) +spheral_add_pybind11_library(SolidMaterial SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Strength/CMakeLists.txt b/src/PYB11/Strength/CMakeLists.txt index 5b9bcb531..3ba24b1ac 100644 --- a/src/PYB11/Strength/CMakeLists.txt +++ b/src/PYB11/Strength/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Strength) +spheral_add_pybind11_library(Strength SPHERAL_MODULE_LIST) diff --git a/src/PYB11/Utilities/CMakeLists.txt b/src/PYB11/Utilities/CMakeLists.txt index 0725a4c04..e6fae8760 100644 --- a/src/PYB11/Utilities/CMakeLists.txt +++ b/src/PYB11/Utilities/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(Utilities) +spheral_add_pybind11_library(Utilities SPHERAL_MODULE_LIST) diff --git a/src/PYB11/polytope/CMakeLists.txt b/src/PYB11/polytope/CMakeLists.txt index 5098ef6f3..d999c1837 100644 --- a/src/PYB11/polytope/CMakeLists.txt +++ b/src/PYB11/polytope/CMakeLists.txt @@ -1 +1 @@ -spheral_add_pybind11_library(polytope) +spheral_add_pybind11_library(polytope SPHERAL_MODULE_LIST) diff --git a/src/Porosity/PalphaPorosity.cc b/src/Porosity/PalphaPorosity.cc index c4a3e6d55..99f3736d4 100644 --- a/src/Porosity/PalphaPorosity.cc +++ b/src/Porosity/PalphaPorosity.cc @@ -109,9 +109,6 @@ PalphaPorosity(const SolidNodeList& nodeList, for (auto i = 0u; i < n; ++i) { mc0[i] = c0[i]; } - const auto alpha0_min = mAlpha0.min(); - VERIFY2((1.0 <= mAlphae) and (mAlphae <= mAlphat) and (mAlphat <= alpha0_min), - "PalphaPorosity input ERROR : require 1.0 <= alphae <= alphat <= alpha0, (alphae, alphat, alpha0) = " << mAlphae << ", " << mAlphat << ", " << alpha0_min); } } diff --git a/src/SPH/SPHHydroBase.cc b/src/SPH/SPHHydroBase.cc index 13e0c3e6f..0a9c5f6a2 100644 --- a/src/SPH/SPHHydroBase.cc +++ b/src/SPH/SPHHydroBase.cc @@ -843,15 +843,16 @@ evaluateDerivatives(const typename Dimension::Scalar time, const auto etaj = Hj*rij; const auto etaMagi = etai.magnitude(); const auto etaMagj = etaj.magnitude(); - const auto etaUnit = etai*safeInvVar(etaMagi); + const auto etaiUnit = etai*safeInvVar(etaMagi); + const auto etajUnit = etaj*safeInvVar(etaMagj); CHECK(etaMagi >= 0.0); CHECK(etaMagj >= 0.0); // Symmetrized kernel weight and gradient. W.kernelAndGradValue(etaMagi, Hdeti, Wi, gWi); W.kernelAndGradValue(etaMagj, Hdetj, Wj, gWj); - gradWi = gWi*Hi*etaUnit; - gradWj = gWj*Hj*etaUnit; + gradWi = gWi*Hi*etaiUnit; + gradWj = gWj*Hj*etajUnit; if (oneKernel) { WQi = Wi; WQj = Wj; @@ -860,8 +861,8 @@ evaluateDerivatives(const typename Dimension::Scalar time, } else { WQ.kernelAndGradValue(etaMagi, Hdeti, WQi, gWQi); WQ.kernelAndGradValue(etaMagj, Hdetj, WQj, gWQj); - gradWQi = gWQi*Hi*etaUnit; - gradWQj = gWQj*Hj*etaUnit; + gradWQi = gWQi*Hi*etaiUnit; + gradWQj = gWQj*Hj*etajUnit; } // Zero'th and second moment of the node distribution -- used for the diff --git a/src/SPH/SPHHydroBaseRZ.cc b/src/SPH/SPHHydroBaseRZ.cc index c9789551b..59dc54262 100644 --- a/src/SPH/SPHHydroBaseRZ.cc +++ b/src/SPH/SPHHydroBaseRZ.cc @@ -392,15 +392,16 @@ evaluateDerivatives(const Dim<2>::Scalar /*time*/, const auto etaj = Hj*xij; const auto etaMagi = etai.magnitude(); const auto etaMagj = etaj.magnitude(); - const auto etaUnit = etai*safeInvVar(etaMagi); + const auto etaiUnit = etai*safeInvVar(etaMagi); + const auto etajUnit = etaj*safeInvVar(etaMagj); CHECK(etaMagi >= 0.0); CHECK(etaMagj >= 0.0); // Symmetrized kernel weight and gradient. W.kernelAndGradValue(etaMagi, Hdeti, Wi, gWi); W.kernelAndGradValue(etaMagj, Hdetj, Wj, gWj); - gradWi = gWi*Hi*etaUnit; - gradWj = gWj*Hj*etaUnit; + gradWi = gWi*Hi*etaiUnit; + gradWj = gWj*Hj*etajUnit; if (oneKernel) { WQi = Wi; WQj = Wj; @@ -409,8 +410,8 @@ evaluateDerivatives(const Dim<2>::Scalar /*time*/, } else { WQ.kernelAndGradValue(etaMagi, Hdeti, WQi, gWQi); WQ.kernelAndGradValue(etaMagj, Hdetj, WQj, gWQj); - gradWQi = gWQi*Hi*etaUnit; - gradWQj = gWQj*Hj*etaUnit; + gradWQi = gWQi*Hi*etaiUnit; + gradWQj = gWQj*Hj*etajUnit; } // Zero'th and second moment of the node distribution -- used for the diff --git a/src/SPH/SPHHydros.py b/src/SPH/SPHHydros.py index 0a7a527cd..cfe8daa0c 100644 --- a/src/SPH/SPHHydros.py +++ b/src/SPH/SPHHydros.py @@ -29,7 +29,8 @@ def SPH(W, xmin = (-1e100, -1e100, -1e100), xmax = ( 1e100, 1e100, 1e100), etaMinAxis = 0.1, - ASPH = False): + ASPH = False, + smoothingScaleMethod = None): # Check if we're running solid or fluid hydro nfluid = dataBase.numFluidNodeLists @@ -86,16 +87,11 @@ def SPH(W, Q = eval("LimitedMonaghanGingoldViscosity%id(Clinear=%g, Cquadratic=%g)" % (ndim, Cl, Cq)) # Smoothing scale update - if ASPH: - smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim) - else: - smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim) - - # Smoothing scale update - if ASPH: - smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim) - else: - smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim) + if smoothingScaleMethod is None: + if ASPH: + smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim) + else: + smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim) # Build the constructor arguments xmin = (ndim,) + xmin diff --git a/src/SPH/SolidSPHHydroBase.cc b/src/SPH/SolidSPHHydroBase.cc index 72a1164cc..2295f311a 100644 --- a/src/SPH/SolidSPHHydroBase.cc +++ b/src/SPH/SolidSPHHydroBase.cc @@ -548,15 +548,16 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, const auto etaj = Hj*rij; const auto etaMagi = etai.magnitude(); const auto etaMagj = etaj.magnitude(); - const auto etaUnit = etai*safeInvVar(etaMagi); + const auto etaiUnit = etai*safeInvVar(etaMagi); + const auto etajUnit = etaj*safeInvVar(etaMagj); CHECK(etaMagi >= 0.0); CHECK(etaMagj >= 0.0); // Symmetrized kernel weight and gradient. W.kernelAndGradValue(etaMagi, Hdeti, Wi, gWi); W.kernelAndGradValue(etaMagj, Hdetj, Wj, gWj); - gradWi = gWi*Hi*etaUnit; - gradWj = gWj*Hj*etaUnit; + gradWi = gWi*Hi*etaiUnit; + gradWj = gWj*Hj*etajUnit; if (oneKernelQ) { WQi = Wi; WQj = Wj; @@ -565,15 +566,15 @@ evaluateDerivatives(const typename Dimension::Scalar /*time*/, } else { WQ.kernelAndGradValue(etaMagi, Hdeti, WQi, gWQi); WQ.kernelAndGradValue(etaMagj, Hdetj, WQj, gWQj); - gradWQi = gWQi*Hi*etaUnit; - gradWQj = gWQj*Hj*etaUnit; + gradWQi = gWQi*Hi*etaiUnit; + gradWQj = gWQj*Hj*etajUnit; } if (oneKernelG) { gradWGi = gradWi; gradWGj = gradWj; } else { - gradWGi = Hi*etaUnit * WG.gradValue(etaMagi, Hdeti); - gradWGj = Hj*etaUnit * WG.gradValue(etaMagj, Hdetj); + gradWGi = Hi*etaiUnit * WG.gradValue(etaMagi, Hdeti); + gradWGj = Hj*etajUnit * WG.gradValue(etaMagj, Hdetj); } // Zero'th and second moment of the node distribution -- used for the diff --git a/src/SPH/SolidSPHHydroBaseRZ.cc b/src/SPH/SolidSPHHydroBaseRZ.cc index f03222aad..3757aa5a2 100644 --- a/src/SPH/SolidSPHHydroBaseRZ.cc +++ b/src/SPH/SolidSPHHydroBaseRZ.cc @@ -469,15 +469,16 @@ evaluateDerivatives(const Dim<2>::Scalar /*time*/, const auto etaj = Hj*xij; const auto etaMagi = etai.magnitude(); const auto etaMagj = etaj.magnitude(); - const auto etaUnit = etai*safeInvVar(etaMagi); + const auto etaiUnit = etai*safeInvVar(etaMagi); + const auto etajUnit = etaj*safeInvVar(etaMagj); CHECK(etaMagi >= 0.0); CHECK(etaMagj >= 0.0); // Symmetrized kernel weight and gradient. W.kernelAndGradValue(etaMagi, Hdeti, Wi, gWi); W.kernelAndGradValue(etaMagj, Hdetj, Wj, gWj); - gradWi = gWi*Hi*etaUnit; - gradWj = gWj*Hj*etaUnit; + gradWi = gWi*Hi*etaiUnit; + gradWj = gWj*Hj*etajUnit; if (oneKernelQ) { WQi = Wi; WQj = Wj; @@ -486,15 +487,15 @@ evaluateDerivatives(const Dim<2>::Scalar /*time*/, } else { WQ.kernelAndGradValue(etaMagi, Hdeti, WQi, gWQi); WQ.kernelAndGradValue(etaMagj, Hdetj, WQj, gWQj); - gradWQi = gWQi*Hi*etaUnit; - gradWQj = gWQj*Hj*etaUnit; + gradWQi = gWQi*Hi*etaiUnit; + gradWQj = gWQj*Hj*etajUnit; } if (oneKernelG) { gradWGi = gradWi; gradWGj = gradWj; } else { - gradWGi = Hi*etaUnit * WG.gradValue(etaMagi, Hdeti); - gradWGj = Hj*etaUnit * WG.gradValue(etaMagj, Hdetj); + gradWGi = Hi*etaiUnit * WG.gradValue(etaMagi, Hdeti); + gradWGj = Hj*etajUnit * WG.gradValue(etaMagj, Hdetj); } // Determine how we're applying damage. diff --git a/tests/functional/DEM/DEMTests.ats b/tests/functional/DEM/DEMTests.ats index d2204d41e..bcb3ff032 100644 --- a/tests/functional/DEM/DEMTests.ats +++ b/tests/functional/DEM/DEMTests.ats @@ -10,6 +10,10 @@ source("LinearSpringDEM/TwoParticleCollision/fiveParticleCollision-2d.py") source("LinearSpringDEM/ImpactingSquares/impactingSquares-2d.py") source("LinearSpringDEM/ImpactingSquares/impactingSquares-3d.py") -# test restitution coefficient and friction against solid boundaries +# test restitution coefficient, natural frequency, and friction against solid boundaries source("LinearSpringDEM/SolidBoundaryCondition/singleParticleBoundaryCollision-3d.py") source("LinearSpringDEM/SolidBoundaryCondition/finiteSolidPlanes-3d.py") +source("LinearSpringDEM/SolidBoundaryCondition/pureTorsion-3d.py") + +# test moving solid boundaries +source("LinearSpringDEM/MovingSolidBoundaries/singleParticleMovingBoundaryCollision-3d.py") \ No newline at end of file diff --git a/tests/functional/DEM/LinearSpringDEM/ImpactingSquares/impactingSquares-3d.py b/tests/functional/DEM/LinearSpringDEM/ImpactingSquares/impactingSquares-3d.py index 732f513b8..49640ab04 100644 --- a/tests/functional/DEM/LinearSpringDEM/ImpactingSquares/impactingSquares-3d.py +++ b/tests/functional/DEM/LinearSpringDEM/ImpactingSquares/impactingSquares-3d.py @@ -16,6 +16,8 @@ else: from DistributeNodes import distributeNodes3d +random.seed(0) + title("DEM 3d Impacting Squares") # This tests the conservation properties of the DEM package when # distribution across multiple processors diff --git a/tests/functional/DEM/LinearSpringDEM/MovingSolidBoundaries/rotatingDrum-2d.py b/tests/functional/DEM/LinearSpringDEM/MovingSolidBoundaries/rotatingDrum-2d.py new file mode 100644 index 000000000..3503c6352 --- /dev/null +++ b/tests/functional/DEM/LinearSpringDEM/MovingSolidBoundaries/rotatingDrum-2d.py @@ -0,0 +1,278 @@ +import os, shutil, mpi +from math import * +from Spheral2d import * +from SpheralTestUtilities import * +from findLastRestart import * +from GenerateNodeDistribution2d import * +from GenerateDEMfromSPHGenerator import GenerateDEMfromSPHGenerator2d + +if mpi.procs > 1: + from PeanoHilbertDistributeNodes import distributeNodes2d +else: + from DistributeNodes import distributeNodes2d + +#------------------------------------------------------------------------------- +# Generic problem parameters +#------------------------------------------------------------------------------- +commandLine(omegaDrum = 0.25, # angular velocity of drum + radiusDrum = 10.0, # radius of the drum + yThresh = 0.2, # level to initial fill to + yClip = 0.0, # level to clip at after settling + g0 = 5.0, # grav acceleration + + radius = 0.5, # particle radius + normalSpringConstant=10000.0, # spring constant for LDS model + normalRestitutionCoefficient=0.55, # restitution coefficient to get damping const + tangentialSpringConstant=2857.0, # spring constant for LDS model + tangentialRestitutionCoefficient=0.55, # restitution coefficient to get damping const + dynamicFriction = 1.0, # static friction coefficient sliding + staticFriction = 1.0, # dynamic friction coefficient sliding + rollingFriction = 1.05, # static friction coefficient for rolling + torsionalFriction = 1.3, # static friction coefficient for torsion + cohesiveTensileStrength = 0.0, # units of pressure + shapeFactor = 0.1, # in [0,1] shape factor from Zhang 2018, 0 - no torsion or rolling + + neighborSearchBuffer = 0.1, # multiplicative buffer to radius for neighbor search algo + + # integration + IntegratorConstructor = VerletIntegrator, # Verlet one integrator to garenteee conservation + stepsPerCollision = 50, # replaces CFL for DEM + updateBoundaryFrequency = 10, # CAREFUL: make sure fast time stepping is off for DEM + settleTime = 5.0, # time simulated before we start spinning + goalTime = 15.0, # duration of spin cycle + dt = 1e-8, + dtMin = 1.0e-8, + dtMax = 0.1, + dtGrowth = 2.0, + steps = None, + maxSteps = None, + statsStep = 10, + domainIndependent = False, + rigorousBoundaries = False, + dtverbose = False, + + # output control + vizCycle = None, + vizTime = 0.1, + clearDirectories = False, + restoreCycle = None, + restartStep = 1000, + redistributeStep = 500, + dataDir = "dumps-DEM-rotating-drum-2d", + + # ats parameters + boolCheckRestitutionCoefficient=False, # turn on error checking for restitution coefficient + boolCheckSlidingFrictionX=False, # checks sliding friction reduces relative rotation + boolCheckSlidingFrictionY=False, # checks rolling friction reduces relative rotation + boolCheckTorsionalFriction=False, # checks torsional friction reduces relative rotation + restitutionErrorThreshold = 0.02, # relative error actual restitution vs nominal + omegaThreshold = 1e-14, # theshold for perpendicular components that should stay zero + ) + +#------------------------------------------------------------------------------- +# check for bad inputs +#------------------------------------------------------------------------------- +assert radius < radiusDrum/2.0 +assert shapeFactor <= 1.0 and shapeFactor >= 0.0 +assert dynamicFriction >= 0.0 +assert staticFriction >= 0.0 +assert torsionalFriction >= 0.0 +assert rollingFriction >= 0.0 +assert cohesiveTensileStrength >= 0.0 + +#------------------------------------------------------------------------------- +# file things +#------------------------------------------------------------------------------- +testName = "DEM-RotatingDrum-2d" + +dataDir = os.path.join(dataDir, + "restitutionCoefficient=%s" % normalRestitutionCoefficient) + +restartDir = os.path.join(dataDir, "restarts") +vizDir = os.path.join(dataDir, "visit") +restartBaseName = os.path.join(restartDir, testName) +vizBaseName = testName + +if vizCycle is None and vizTime is None: + vizBaseName=None + +#------------------------------------------------------------------------------- +# Check if the necessary output directories exist. If not, create them. +#------------------------------------------------------------------------------- +if mpi.rank == 0: + if clearDirectories and os.path.exists(dataDir): + shutil.rmtree(dataDir) + if not os.path.exists(restartDir): + os.makedirs(restartDir) + if not os.path.exists(vizDir): + os.makedirs(vizDir) +mpi.barrier() + +#------------------------------------------------------------------------------- +# If we're restarting, find the set of most recent restart files. +#------------------------------------------------------------------------------- +if restoreCycle is None: + restoreCycle = findLastRestart(restartBaseName) + +#------------------------------------------------------------------------------- +# This doesn't really matter kernel filler for neighbor algo +#------------------------------------------------------------------------------- +WT = TableKernel(WendlandC2Kernel(), 1000) + +#------------------------------------------------------------------------------- +# Make the NodeList. +#------------------------------------------------------------------------------- +units = CGuS() +nodes1 = makeDEMNodeList("nodeList1", + hmin = 1.0e-30, + hmax = 1.0e30, + hminratio = 100.0, + neighborSearchBuffer = neighborSearchBuffer, + kernelExtent = WT.kernelExtent) +nodeSet = [nodes1] +for nodes in nodeSet: + output("nodes.name") + output("nodes.hmin") + output("nodes.hmax") + output("nodes.hminratio") + output("nodes.nodesPerSmoothingScale") + +#------------------------------------------------------------------------------- +# Set the node properties. (gen 2 particles visit doesn't like just one) +#------------------------------------------------------------------------------- +def rejecter(x,y,m,H): + xnew,ynew,mnew,Hnew = [],[],[],[] + for i in range(len(x)): + if y[i] < yThresh and sqrt(x[i]**2+y[i]**2) < radiusDrum - 1.10*radius: + xnew.append(x[i]) + ynew.append(y[i]) + mnew.append(m[i]) + Hnew.append(H[i]) + + return xnew,ynew,mnew,Hnew + +nx = int(2*radiusDrum / (radius * 1.02)) +ny = nx + +generator0 = GenerateNodeDistribution2d(nx, ny, + rho = 1.0, + distributionType = "lattice", + xmin = (-radiusDrum, -radiusDrum), + xmax = ( radiusDrum, radiusDrum), + rejecter=rejecter) + +generator1 = GenerateDEMfromSPHGenerator2d(WT, + generator0) +distributeNodes2d((nodes1, generator1)) + +#------------------------------------------------------------------------------- +# Construct a DataBase to hold our node list +#------------------------------------------------------------------------------- +db = DataBase() +output("db") +for nodes in nodeSet: + db.appendNodeList(nodes) +output("db.numNodeLists") +output("db.numDEMNodeLists") +output("db.numFluidNodeLists") + + +#------------------------------------------------------------------------------- +# Physics Package: DEM +#------------------------------------------------------------------------------- +dem = DEM(db, + normalSpringConstant = normalSpringConstant, + normalRestitutionCoefficient = normalRestitutionCoefficient, + tangentialSpringConstant = tangentialSpringConstant, + tangentialRestitutionCoefficient = tangentialRestitutionCoefficient, + dynamicFrictionCoefficient = dynamicFriction, + staticFrictionCoefficient = staticFriction, + rollingFrictionCoefficient = rollingFriction, + torsionalFrictionCoefficient = torsionalFriction, + cohesiveTensileStrength =cohesiveTensileStrength, + shapeFactor = shapeFactor, + stepsPerCollision = stepsPerCollision, + enableFastTimeStepping = False) + +packages = [dem] + +solidWall = SphereSolidBoundary(center = Vector(0.0, 0.0), + radius = radiusDrum, + angularVelocity = 0.0) + +dem.appendSolidBoundary(solidWall) + +#------------------------------------------------------------------------------- +# Gravity: DEM +#------------------------------------------------------------------------------- +gravity = ConstantAcceleration(a0 = Vector(0.0,-g0), + nodeList = nodes1) +packages += [gravity] + +#------------------------------------------------------------------------------- +# initial conditions +#------------------------------------------------------------------------------- +# velocity = nodes1.velocity() +# particleRadius = nodes1.particleRadius() + +# velocity[0] = Vector(0.0,0.0,-vImpact) +# particleRadius[0] = radius + +#------------------------------------------------------------------------------- +# Construct a time integrator, and add the physics packages. +#------------------------------------------------------------------------------- +integrator = IntegratorConstructor(db) +for p in packages: + integrator.appendPhysicsPackage(p) +integrator.lastDt = dt +integrator.dtMin = dtMin +integrator.dtMax = dtMax +integrator.dtGrowth = dtGrowth +integrator.domainDecompositionIndependent = domainIndependent +integrator.verbose = dtverbose +integrator.rigorousBoundaries = rigorousBoundaries +integrator.updateBoundaryFrequency = 10 +integrator.cullGhostNodes = False + +output("integrator") +output("integrator.havePhysicsPackage(dem)") +output("integrator.lastDt") +output("integrator.dtMin") +output("integrator.dtMax") +output("integrator.dtGrowth") +output("integrator.domainDecompositionIndependent") +output("integrator.rigorousBoundaries") +output("integrator.verbose") + +#------------------------------------------------------------------------------- +# Make the problem controller. +#------------------------------------------------------------------------------- +from SpheralPointmeshSiloDump import dumpPhysicsState +control = SpheralController(integrator, WT, + iterateInitialH = False, + initializeDerivatives = True, + statsStep = statsStep, + restartStep = restartStep, + restartBaseName = restartBaseName, + restoreCycle = restoreCycle, + vizBaseName = vizBaseName, + vizMethod=dumpPhysicsState, + vizDir = vizDir, + vizStep = vizCycle, + vizTime = vizTime) +output("control") + +#------------------------------------------------------------------------------- +# Advance to the end time. +#------------------------------------------------------------------------------- +if not steps is None: + control.step(steps) +else: + control.advance(settleTime, maxSteps) + +solidWall.angularVelocity = omegaDrum + +if not steps is None: + control.step(steps) +else: + control.advance(settleTime+goalTime, maxSteps) diff --git a/tests/functional/DEM/LinearSpringDEM/MovingSolidBoundaries/singleParticleMovingBoundaryCollision-3d.py b/tests/functional/DEM/LinearSpringDEM/MovingSolidBoundaries/singleParticleMovingBoundaryCollision-3d.py new file mode 100644 index 000000000..22a4e0f39 --- /dev/null +++ b/tests/functional/DEM/LinearSpringDEM/MovingSolidBoundaries/singleParticleMovingBoundaryCollision-3d.py @@ -0,0 +1,265 @@ +#ATS:DEM3dMSPBC1 = test( SELF, "--clearDirectories True --boolCheckRestitutionCoefficient True --normalRestitutionCoefficient 1.0 --steps 100", label="DEM perfectly elastic collision with solid boundary -- 3-D (serial)") +#ATS:DEM3dMSPBC2 = test( SELF, "--clearDirectories True --boolCheckRestitutionCoefficient True --normalRestitutionCoefficient 0.5 --steps 100", label="DEM inelastic collision with solid boundary -- 3-D (serial)") + +import os, shutil, mpi +from math import * +from Spheral3d import * +from SpheralTestUtilities import * +from findLastRestart import * +from GenerateNodeDistribution3d import * +from GenerateDEMfromSPHGenerator import GenerateDEMfromSPHGenerator3d + +if mpi.procs > 1: + from PeanoHilbertDistributeNodes import distributeNodes3d +else: + from DistributeNodes import distributeNodes3d + +title("DEM Moving Boundary Restitution Coefficient Test") + +#------------------------------------------------------------------------------- +# Generic problem parameters +#------------------------------------------------------------------------------- +commandLine(vImpact = 1.0, # impact velocity (negative z direction) + vbc = 0.2, # velocity of bc (positive z direction) + + h0 = 1.00, # initial height above the solid bc plane + radius = 0.95, # particle radius + normalSpringConstant=10000.0, # spring constant for LDS model + normalRestitutionCoefficient=1.00, # restitution coefficient to get damping const + tangentialSpringConstant=2857.0, # spring constant for LDS model + tangentialRestitutionCoefficient=0.55, # restitution coefficient to get damping const + dynamicFriction = 1.0, # static friction coefficient sliding + staticFriction = 1.0, # dynamic friction coefficient sliding + rollingFriction = 1.05, # static friction coefficient for rolling + torsionalFriction = 1.3, # static friction coefficient for torsion + cohesiveTensileStrength = 0.0, # units of pressure + shapeFactor = 0.5, # in [0,1] shape factor from Zhang 2018, 0 - no torsion or rolling + + neighborSearchBuffer = 0.1, # multiplicative buffer to radius for neighbor search algo + + # integration + IntegratorConstructor = VerletIntegrator, # Verlet one integrator to garenteee conservation + stepsPerCollision = 50, # replaces CFL for DEM + goalTime = 3.0, + dt = 1e-8, + dtMin = 1.0e-8, + dtMax = 0.1, + dtGrowth = 2.0, + steps = None, + maxSteps = None, + statsStep = 10, + domainIndependent = False, + rigorousBoundaries = False, + dtverbose = False, + + # output control + vizCycle = None, + vizTime = 0.1, + clearDirectories = False, + restoreCycle = None, + restartStep = 1000, + redistributeStep = 500, + dataDir = "dumps-DEM-particle-boundary-3d", + + # ats parameters + boolCheckRestitutionCoefficient=False, # turn on error checking for restitution coefficient + boolCheckSlidingFrictionX=False, # checks sliding friction reduces relative rotation + boolCheckSlidingFrictionY=False, # checks rolling friction reduces relative rotation + boolCheckTorsionalFriction=False, # checks torsional friction reduces relative rotation + restitutionErrorThreshold = 0.02, # relative error actual restitution vs nominal + omegaThreshold = 1e-14, # theshold for perpendicular components that should stay zero + ) + +#------------------------------------------------------------------------------- +# check for bad inputs +#------------------------------------------------------------------------------- +assert mpi.procs == 1 +assert h0 > radius +assert shapeFactor <= 1.0 and shapeFactor >= 0.0 +assert dynamicFriction >= 0.0 +assert staticFriction >= 0.0 +assert torsionalFriction >= 0.0 +assert rollingFriction >= 0.0 +assert cohesiveTensileStrength >= 0.0 + +#------------------------------------------------------------------------------- +# file things +#------------------------------------------------------------------------------- +testName = "DEM-SingleParticleMovingBoundaryCollision-3d" + +dataDir = os.path.join(dataDir, + "restitutionCoefficient=%s" % normalRestitutionCoefficient) + +restartDir = os.path.join(dataDir, "restarts") +vizDir = os.path.join(dataDir, "visit") +restartBaseName = os.path.join(restartDir, testName) +vizBaseName = testName + +if vizCycle is None and vizTime is None: + vizBaseName=None + +#------------------------------------------------------------------------------- +# Check if the necessary output directories exist. If not, create them. +#------------------------------------------------------------------------------- +if mpi.rank == 0: + if clearDirectories and os.path.exists(dataDir): + shutil.rmtree(dataDir) + if not os.path.exists(restartDir): + os.makedirs(restartDir) + if not os.path.exists(vizDir): + os.makedirs(vizDir) +mpi.barrier() + +#------------------------------------------------------------------------------- +# If we're restarting, find the set of most recent restart files. +#------------------------------------------------------------------------------- +if restoreCycle is None: + restoreCycle = findLastRestart(restartBaseName) + +#------------------------------------------------------------------------------- +# This doesn't really matter kernel filler for neighbor algo +#------------------------------------------------------------------------------- +WT = TableKernel(WendlandC2Kernel(), 1000) + +#------------------------------------------------------------------------------- +# Make the NodeList. +#------------------------------------------------------------------------------- +units = CGuS() +nodes1 = makeDEMNodeList("nodeList1", + hmin = 1.0e-30, + hmax = 1.0e30, + hminratio = 100.0, + neighborSearchBuffer = neighborSearchBuffer, + kernelExtent = WT.kernelExtent) +nodeSet = [nodes1] +for nodes in nodeSet: + output("nodes.name") + output("nodes.hmin") + output("nodes.hmax") + output("nodes.hminratio") + output("nodes.nodesPerSmoothingScale") + +#------------------------------------------------------------------------------- +# Set the node properties. (gen 2 particles visit doesn't like just one) +#------------------------------------------------------------------------------- +generator0 = GenerateNodeDistribution3d(1, 1, 1, + rho = 1.0, + distributionType = "lattice", + xmin = (-1.0, -1.0, -1+h0), + xmax = (1.0, 1.0, 1+h0)) + +generator1 = GenerateDEMfromSPHGenerator3d(WT, + generator0) +distributeNodes3d((nodes1, generator1)) + +#------------------------------------------------------------------------------- +# Construct a DataBase to hold our node list +#------------------------------------------------------------------------------- +db = DataBase() +output("db") +for nodes in nodeSet: + db.appendNodeList(nodes) +output("db.numNodeLists") +output("db.numDEMNodeLists") +output("db.numFluidNodeLists") + + +#------------------------------------------------------------------------------- +# DEM +#------------------------------------------------------------------------------- +dem = DEM(db, + normalSpringConstant = normalSpringConstant, + normalRestitutionCoefficient = normalRestitutionCoefficient, + tangentialSpringConstant = tangentialSpringConstant, + tangentialRestitutionCoefficient = tangentialRestitutionCoefficient, + dynamicFrictionCoefficient = dynamicFriction, + staticFrictionCoefficient = staticFriction, + rollingFrictionCoefficient = rollingFriction, + torsionalFrictionCoefficient = torsionalFriction, + cohesiveTensileStrength =cohesiveTensileStrength, + shapeFactor = shapeFactor, + stepsPerCollision = stepsPerCollision) + +packages = [dem] + +# set the solid bcs and add to dem package +solidWall = InfinitePlaneSolidBoundary(Vector(0.0, 0.0, 0.0), Vector( 0.0, 0.0, 1.0)) +solidWall.velocity = Vector(0.0,0.0,vbc) + +dem.appendSolidBoundary(solidWall) + +#------------------------------------------------------------------------------- +# initial conditions +#------------------------------------------------------------------------------- +velocity = nodes1.velocity() +particleRadius = nodes1.particleRadius() + +velocity[0] = Vector(0.0,0.0,-vImpact) +particleRadius[0] = radius + +#------------------------------------------------------------------------------- +# Construct a time integrator, and add the physics packages. +#------------------------------------------------------------------------------- +integrator = IntegratorConstructor(db) +for p in packages: + integrator.appendPhysicsPackage(p) +integrator.lastDt = dt +integrator.dtMin = dtMin +integrator.dtMax = dtMax +integrator.dtGrowth = dtGrowth +integrator.domainDecompositionIndependent = domainIndependent +integrator.verbose = dtverbose +integrator.rigorousBoundaries = rigorousBoundaries + +integrator.cullGhostNodes = False + +output("integrator") +output("integrator.havePhysicsPackage(dem)") +output("integrator.lastDt") +output("integrator.dtMin") +output("integrator.dtMax") +output("integrator.dtGrowth") +output("integrator.domainDecompositionIndependent") +output("integrator.rigorousBoundaries") +output("integrator.verbose") + +#------------------------------------------------------------------------------- +# Make the problem controller. +#------------------------------------------------------------------------------- +from SpheralPointmeshSiloDump import dumpPhysicsState +control = SpheralController(integrator, WT, + iterateInitialH = False, + initializeDerivatives = True, + statsStep = statsStep, + restartStep = restartStep, + restartBaseName = restartBaseName, + restoreCycle = restoreCycle, + vizBaseName = vizBaseName, + vizMethod=dumpPhysicsState, + vizDir = vizDir, + vizStep = vizCycle, + vizTime = vizTime) +output("control") + +#------------------------------------------------------------------------------- +# Advance to the end time. +#------------------------------------------------------------------------------- +if not steps is None: + control.step(steps) +else: + control.advance(goalTime, maxSteps) + +#------------------------------------------------------------------------------- +# Great success? +#------------------------------------------------------------------------------- +if boolCheckRestitutionCoefficient: +# check our restitution coefficient is correct +#------------------------------------------------------------- + vijPostImpact = -velocity[0].z + vbc + vijPreImpact = vImpact + vbc + restitutionEff = vijPostImpact/vijPreImpact + restitutionError = abs(restitutionEff + normalRestitutionCoefficient)/normalRestitutionCoefficient + if restitutionError > restitutionErrorThreshold: + print(" final velocity = {0}".format(vijPostImpact)) + print(" initial velocity = {0}".format(vijPreImpact)) + raise ValueError(" relative restitution coefficient error, %g, exceeds bounds" % restitutionError) \ No newline at end of file diff --git a/tests/functional/DEM/LinearSpringDEM/SolidBoundaryCondition/pureTorsion-3d.py b/tests/functional/DEM/LinearSpringDEM/SolidBoundaryCondition/pureTorsion-3d.py new file mode 100644 index 000000000..0302d1c63 --- /dev/null +++ b/tests/functional/DEM/LinearSpringDEM/SolidBoundaryCondition/pureTorsion-3d.py @@ -0,0 +1,356 @@ +#ATS:DEM3dPT = test( SELF, "--clearDirectories True --checkRestitutionCoefficient True --checkNaturalFrequency True", label="DEM pure torsion test -- 3-D (serial)") + +import os, sys, shutil, mpi +from math import * +from Spheral3d import * +from SpheralTestUtilities import * +from findLastRestart import * +from GenerateNodeDistribution3d import * +from GenerateDEMfromSPHGenerator import GenerateDEMfromSPHGenerator3d + +import numpy as np + +if mpi.procs > 1: + from PeanoHilbertDistributeNodes import distributeNodes3d +else: + from DistributeNodes import distributeNodes3d + +title("DEM 3d Pure Torsion Test") + +# This tests the natural freq and restitution coefficient for Zhang et.al. formulation +#------------------------------------------------------------------------------- +# Generic problem parameters +#------------------------------------------------------------------------------- +commandLine(numParticlePerLength = 3, # number of particles on a side of the box + normalSpringConstant=100.0, # spring constant for LDS model + normalRestitutionCoefficient=0.55, # restitution coefficient to get damping const + tangentialSpringConstant=20.857, # spring constant for LDS model + tangentialRestitutionCoefficient=0.55, # restitution coefficient to get damping const + dynamicFriction = 1.0, # static friction coefficient sliding + staticFriction = 1.0, # dynamic friction coefficient sliding + rollingFriction = 1.05, # static friction coefficient for rolling + torsionalFriction = 1.3, # static friction coefficient for torsion + cohesiveTensileStrength = 0.0, # units of pressure + shapeFactor = 0.1, # in [0,1] shape factor from Zhang 2018, 0 - no torsion or rolling + + particleRadius = 1.0, # particle radius + particleDensity = 2.60, + particleVelocity = 0.1, + + neighborSearchBuffer = 0.1, # multiplicative buffer to radius for neighbor search algo + nPerh = 1.01, + + + useSolidBoundary = True, + + # integration + IntegratorConstructor = VerletIntegrator, + stepsPerCollision = 50, + goalTime = 25.0, + dt = 1e-8, + dtMin = 1.0e-8, + dtMax = 0.1, + dtGrowth = 2.0, + steps = None, + maxSteps = None, + statsStep = 10, + domainIndependent = False, + rigorousBoundaries = False, + dtverbose = False, + + # output control + vizCycle = None, + vizTime = 1.0, + clearDirectories = False, + restoreCycle = None, + restartStep = 10000, + redistributeStep = 100000000000000, + dataDir = "dumps-DEM-PureTorsionTest-3d", + + # ats + checkRestitutionCoefficient = False, + threshRestitutionCoefficient = 0.05, + checkNaturalFrequency = False, + threshNaturalFrequency = 0.1, + ) + +#------------------------------------------------------------------------------- +# file things +#------------------------------------------------------------------------------- +testName = "DEM-PureTorsionTest-3d" +restartDir = os.path.join(dataDir, "restarts") +vizDir = os.path.join(dataDir, "visit") +restartBaseName = os.path.join(restartDir, testName) +vizBaseName = testName + +if vizCycle is None and vizTime is None: + vizBaseName=None + +#------------------------------------------------------------------------------- +# Check if the necessary output directories exist. If not, create them. +#------------------------------------------------------------------------------- +if mpi.rank == 0: + if clearDirectories and os.path.exists(dataDir): + shutil.rmtree(dataDir) + if not os.path.exists(restartDir): + os.makedirs(restartDir) + if not os.path.exists(vizDir): + os.makedirs(vizDir) +mpi.barrier() + +#------------------------------------------------------------------------------- +# If we're restarting, find the set of most recent restart files. +#------------------------------------------------------------------------------- +if restoreCycle is None: + restoreCycle = findLastRestart(restartBaseName) + + +#------------------------------------------------------------------------------- +# This doesn't really matter kernel filler for neighbor algo +#------------------------------------------------------------------------------- +WT = TableKernel(WendlandC2Kernel(), 1000) + +#------------------------------------------------------------------------------- +# Make the NodeList. +#------------------------------------------------------------------------------- +units = CGuS() +nodes1 = makeDEMNodeList("nodeList1", + neighborSearchBuffer = neighborSearchBuffer, + kernelExtent = WT.kernelExtent) +nodeSet = [nodes1] +for nodes in nodeSet: + output("nodes.name") + output("nodes.hmin") + output("nodes.hmax") + output("nodes.hminratio") + output("nodes.nodesPerSmoothingScale") + +#------------------------------------------------------------------------------- +# Set the node properties. +#------------------------------------------------------------------------------- +if restoreCycle is None: + generator0 = GenerateNodeDistribution3d(2, 2, 1, + rho = 1.0, + distributionType = "lattice", + xmin = (0, 0.0, 0), + xmax = ( particleRadius*5,particleRadius*5, particleRadius*5), + nNodePerh = nPerh) + + # transforms particle properties from generator + def DEMParticleGenerator(xi,yi,zi,Hi,mi,Ri): + xout = [xi] + yout = [yi] + zout = [particleRadius] + mout = [particleDensity * 4.0/3.0*np.pi*particleRadius**3] + Rout = [particleRadius] + return xout,yout,zout,mout,Rout + + generator1 = GenerateDEMfromSPHGenerator3d(WT, + generator0, + DEMParticleGenerator=DEMParticleGenerator) + + distributeNodes3d((nodes1, generator1)) + +#------------------------------------------------------------------------------- +# Construct a DataBase to hold our node list +#------------------------------------------------------------------------------- +db = DataBase() +output("db") +for nodes in nodeSet: + db.appendNodeList(nodes) +output("db.numNodeLists") +output("db.numDEMNodeLists") +output("db.numFluidNodeLists") + + +#------------------------------------------------------------------------------- +# PhysicsPackage : DEM +#------------------------------------------------------------------------------- +dem = DEM(db, + normalSpringConstant = normalSpringConstant, + normalRestitutionCoefficient = normalRestitutionCoefficient, + tangentialSpringConstant = tangentialSpringConstant, + tangentialRestitutionCoefficient = tangentialRestitutionCoefficient, + dynamicFrictionCoefficient = dynamicFriction, + staticFrictionCoefficient = staticFriction, + rollingFrictionCoefficient = rollingFriction, + torsionalFrictionCoefficient = torsionalFriction, + cohesiveTensileStrength = cohesiveTensileStrength, + shapeFactor = shapeFactor, + stepsPerCollision = stepsPerCollision) + + +packages = [dem] + + +#------------------------------------------------------------------------------- +# PhysicsPackage : gravity +#------------------------------------------------------------------------------- +gravity = ConstantAcceleration(a0 = Vector(0.0, 0.0, -1.00), + nodeList = nodes1) +packages.append(gravity) +#------------------------------------------------------------------------------- +# Create boundary conditions. +#------------------------------------------------------------------------------- +# implement boundary condition using the DEM packages solid wall feature +if useSolidBoundary: + + solidWall = InfinitePlaneSolidBoundary(Vector(0.0, 0.0, 0.0), Vector( 0.0, 0.0, 1.0)) + dem.appendSolidBoundary(solidWall) + +# implement boundary condition using Spheral's ghost particle reflection +else: + bcs = [ReflectingBoundary(Plane(Vector(0.0, 0.0, 0.0), Vector( 0.0, 0.0, 1.0)))] + for package in packages: + for bc in bcs: + package.appendBoundary(bc) + +#------------------------------------------------------------------------------- +# Fields and Variables +#------------------------------------------------------------------------------- +numNodeLists = db.numNodeLists +nodeLists = db.nodeLists() + +position = db.DEMPosition +mass = db.DEMMass +velocity = db.DEMVelocity +H = db.DEMHfield +radius = db.DEMParticleRadius +compositeParticleIndex = db.DEMCompositeParticleIndex + +uniqueIndex = db.DEMUniqueIndex +omega = dem.omega + +#------------------------------------------------------------------------------- +# Construct a time integrator, and add the physics packages. +#------------------------------------------------------------------------------- + +integrator = IntegratorConstructor(db) +for p in packages: + integrator.appendPhysicsPackage(p) +integrator.lastDt = dt +integrator.dtMin = dtMin +integrator.dtMax = dtMax +integrator.dtGrowth = dtGrowth +integrator.domainDecompositionIndependent = domainIndependent +integrator.verbose = dtverbose +integrator.rigorousBoundaries = rigorousBoundaries + +integrator.cullGhostNodes = False + +output("integrator") +output("integrator.havePhysicsPackage(dem)") +output("integrator.lastDt") +output("integrator.dtMin") +output("integrator.dtMax") +output("integrator.dtGrowth") +output("integrator.domainDecompositionIndependent") +output("integrator.rigorousBoundaries") +output("integrator.verbose") + +#------------------------------------------------------------------------------- +# Periodic Work Function: track resitution coefficient +#------------------------------------------------------------------------------- +class OmegaTracker: + def __init__(self): + self.maxOmega = 0.0 + self.period = 0.0 + self.omegan = 0.0 + def periodicWorkFunction(self,cycle,time,dt): + omegai = omega[0][1][2] + if omegai < self.maxOmega: + self.maxOmega = omegai + self.period = time - 15 + self.omegan = pi / self.period +omegaTracker = OmegaTracker() +periodicWork=[(omegaTracker.periodicWorkFunction,1)] + +#------------------------------------------------------------------------------- +# Make the problem controller. +#------------------------------------------------------------------------------- +from SpheralPointmeshSiloDump import dumpPhysicsState +control = SpheralController(integrator, + iterateInitialH = False, + initializeDerivatives = True, + statsStep = statsStep, + restartStep = restartStep, + redistributeStep=redistributeStep, + restartBaseName = restartBaseName, + restoreCycle = restoreCycle, + vizBaseName = vizBaseName, + vizMethod = dumpPhysicsState, + vizGhosts=True, + vizDir = vizDir, + vizStep = vizCycle, + vizTime = vizTime, + periodicWork=periodicWork) +output("control") + +#------------------------------------------------------------------------------- +# Advance to the end time. +#------------------------------------------------------------------------------- + +if not steps is None: + control.step(steps) +else: + + # settle dem particles on solid bc in grav field + control.advance(15.0, maxSteps) + + # give 3 particles torsional rotation + for i in range(nodes.numInternalNodes): + if i > 0: + velocity[0][i]=Vector(0,0,0) + omega[0][i]=Vector(0,0,particleVelocity/particleRadius) + + # run to the goal time + control.advance(15.0+goalTime, maxSteps) + +if checkRestitutionCoefficient or checkNaturalFrequency: + + # get the sliding damping constant + beta = pi/log(min(max(tangentialRestitutionCoefficient,1e-4),0.9999)) + nu = 2*tangentialSpringConstant/(1+beta*beta) + mi = particleDensity * 4.0/3.0*np.pi*particleRadius**3 + C = sqrt(2*mi*nu) + + # get the natural frequency and effective resitution coefficient of torsion + omegan = sqrt(5*tangentialSpringConstant*shapeFactor*shapeFactor/mi) + alphan = 5 * C*shapeFactor*shapeFactor/ (2 * mi) + analyticRestitution = exp(-alphan*pi/sqrt(omegan**2-alphan**2)) + + print("") + print("==============================================================") + + if checkNaturalFrequency: + print("") + print(" Checking Torsional Natural Frequency ") + print("") + print(" analytic natural freq : %g" % omegan) + print(" numerical natural freq : %g" % omegaTracker.omegan) + + relativeErrorNaturalFrequency = (abs(omegaTracker.omegan-omegan)/omegan) + + print(" relative error : %g" % relativeErrorNaturalFrequency) + + if relativeErrorNaturalFrequency > threshNaturalFrequency: + raise ValueError(" natural frequency is not within error bounds ") + + if checkRestitutionCoefficient: + + numericalRestitutionCoefficient = (-omegaTracker.maxOmega/particleVelocity*particleRadius) + + print("") + print(" Checking Torsional Restitution Coefficient ") + print("") + print(" analytic restitution coefficient : %g" % analyticRestitution) + print(" numerical restitution coefficient : %g" % numericalRestitutionCoefficient) + + relativeErrorRestitution = (abs(numericalRestitutionCoefficient-analyticRestitution)/analyticRestitution) + + print(" relative error : %g" % relativeErrorRestitution) + + if relativeErrorRestitution > threshRestitutionCoefficient: + raise ValueError(" restitution coefficient is not within error bounds ") + + print("==============================================================") \ No newline at end of file diff --git a/tests/functional/DEM/LinearSpringDEM/SolidBoundaryCondition/rollingParticleTest.py b/tests/functional/DEM/LinearSpringDEM/SolidBoundaryCondition/rollingParticleTest.py new file mode 100644 index 000000000..8a58078e1 --- /dev/null +++ b/tests/functional/DEM/LinearSpringDEM/SolidBoundaryCondition/rollingParticleTest.py @@ -0,0 +1,302 @@ +#ATS:DEM3dImpact = test( SELF, "--clearDirectories True --checkConservation True --goalTime 1.0", label="DEM impacting squares -- 3-D (parallel)", np=8) + +import os, sys, shutil, mpi, random +from math import * +from Spheral3d import * +from SpheralTestUtilities import * +from findLastRestart import * +from GenerateNodeDistribution3d import * +from GenerateDEMfromSPHGenerator import GenerateDEMfromSPHGenerator3d + +import numpy as np + +sys.path.insert(0, '..') +from DEMConservationTracker import TrackConservation3d as TrackConservation + +if mpi.procs > 1: + from PeanoHilbertDistributeNodes import distributeNodes3d +else: + from DistributeNodes import distributeNodes3d + +title("DEM 3d Drop Test with Particle Generation") +# this tests the ability to generate particle on the fly +# during the course of a simulation using a periodic +# work function. It also tests the solid boundary condition + +#------------------------------------------------------------------------------- +# Generic problem parameters +#------------------------------------------------------------------------------- +commandLine(numParticlePerLength = 3, # number of particles on a side of the box + normalSpringConstant=1.0, # spring constant for LDS model + normalRestitutionCoefficient=0.55, # restitution coefficient to get damping const + tangentialSpringConstant=0.2857, # spring constant for LDS model + tangentialRestitutionCoefficient=0.55, # restitution coefficient to get damping const + dynamicFriction = 1.0, # static friction coefficient sliding + staticFriction = 1.0, # dynamic friction coefficient sliding + rollingFriction = 1.05, # static friction coefficient for rolling + torsionalFriction = 1.3, # static friction coefficient for torsion + cohesiveTensileStrength = 0.0, # units of pressure + shapeFactor = 0.1, # in [0,1] shape factor from Zhang 2018, 0 - no torsion or rolling + + particleRadius = 0.10, # particle radius + particleDensity = 2.60, + particleVelocity = 0.1, + + neighborSearchBuffer = 0.1, # multiplicative buffer to radius for neighbor search algo + nPerh = 1.01, + + # integration + IntegratorConstructor = VerletIntegrator, + stepsPerCollision = 50, # replaces CFL for DEM + goalTime = 25.0, + dt = 1e-8, + dtMin = 1.0e-8, + dtMax = 0.1, + dtGrowth = 2.0, + steps = None, + maxSteps = None, + statsStep = 10, + domainIndependent = False, + rigorousBoundaries = False, + dtverbose = False, + + # output control + vizCycle = None, + vizTime = 1.0, + clearDirectories = False, + restoreCycle = None, + restartStep = 10000, + redistributeStep = 100000000000000, + dataDir = "dumps-DEM-impactingSquares-3d", + + # ats + checkRestart = False, + checkConservation = False, # turn on error checking for momentum conservation + conservationErrorThreshold = 2e-14, # relative error for momentum conservation + ) + +#------------------------------------------------------------------------------- +# file things +#------------------------------------------------------------------------------- +testName = "DEM-ImpactingSquares-3d" +restartDir = os.path.join(dataDir, "restarts") +vizDir = os.path.join(dataDir, "visit") +restartBaseName = os.path.join(restartDir, testName) +vizBaseName = testName + +if vizCycle is None and vizTime is None: + vizBaseName=None + +#------------------------------------------------------------------------------- +# Check if the necessary output directories exist. If not, create them. +#------------------------------------------------------------------------------- +if mpi.rank == 0: + if clearDirectories and os.path.exists(dataDir): + shutil.rmtree(dataDir) + if not os.path.exists(restartDir): + os.makedirs(restartDir) + if not os.path.exists(vizDir): + os.makedirs(vizDir) +mpi.barrier() + +#------------------------------------------------------------------------------- +# If we're restarting, find the set of most recent restart files. +#------------------------------------------------------------------------------- +if restoreCycle is None: + restoreCycle = findLastRestart(restartBaseName) + + +#------------------------------------------------------------------------------- +# This doesn't really matter kernel filler for neighbor algo +#------------------------------------------------------------------------------- +WT = TableKernel(WendlandC2Kernel(), 1000) + +#------------------------------------------------------------------------------- +# Make the NodeList. +#------------------------------------------------------------------------------- +units = CGuS() +nodes1 = makeDEMNodeList("nodeList1", + neighborSearchBuffer = neighborSearchBuffer, + kernelExtent = WT.kernelExtent) +nodeSet = [nodes1] +for nodes in nodeSet: + output("nodes.name") + output("nodes.hmin") + output("nodes.hmax") + output("nodes.hminratio") + output("nodes.nodesPerSmoothingScale") + +#------------------------------------------------------------------------------- +# Set the node properties. +#------------------------------------------------------------------------------- +if restoreCycle is None: + generator0 = GenerateNodeDistribution3d(2, 2, 1, + rho = 1.0, + distributionType = "lattice", + xmin = (0, 0.0, 1.0), + xmax = ( 1.0,1.0, 2.0), + nNodePerh = nPerh) + + # # really simple bar shaped particle + def DEMParticleGenerator(xi,yi,zi,Hi,mi,Ri): + xout = [xi] + yout = [yi] + zout = [particleRadius] + mout = [particleDensity * 4.0/3.0*np.pi*particleRadius**3] + Rout = [particleRadius] + return xout,yout,zout,mout,Rout + + generator1 = GenerateDEMfromSPHGenerator3d(WT, + generator0, + DEMParticleGenerator=DEMParticleGenerator) + + distributeNodes3d((nodes1, generator1)) + +#------------------------------------------------------------------------------- +# Construct a DataBase to hold our node list +#------------------------------------------------------------------------------- +db = DataBase() +output("db") +for nodes in nodeSet: + db.appendNodeList(nodes) +output("db.numNodeLists") +output("db.numDEMNodeLists") +output("db.numFluidNodeLists") + + +#------------------------------------------------------------------------------- +# PhysicsPackage : DEM +#------------------------------------------------------------------------------- +dem = DEM(db, + normalSpringConstant = normalSpringConstant, + normalRestitutionCoefficient = normalRestitutionCoefficient, + tangentialSpringConstant = tangentialSpringConstant, + tangentialRestitutionCoefficient = tangentialRestitutionCoefficient, + dynamicFrictionCoefficient = dynamicFriction, + staticFrictionCoefficient = staticFriction, + rollingFrictionCoefficient = rollingFriction, + torsionalFrictionCoefficient = torsionalFriction, + cohesiveTensileStrength = cohesiveTensileStrength, + shapeFactor = shapeFactor, + stepsPerCollision = stepsPerCollision) + +packages = [dem] + +solidWall = InfinitePlaneSolidBoundary(Vector(0.0, 0.0, 0.0), Vector( 0.0, 0.0, 1.0)) +#solidWall2 = CylinderSolidBoundary(Vector(0.0, 0.0, -10.0),Vector(0.0, 0.0,1.0),5.0,15.0) + +dem.appendSolidBoundary(solidWall) +#dem.appendSolidBoundary(solidWall2) +# #------------------------------------------------------------------------------- +# # PhysicsPackage : gravity +# #------------------------------------------------------------------------------- +gravity = ConstantAcceleration(a0 = Vector(0.0, 0.0, -1.00), + nodeList = nodes1) +packages += [gravity] + +# #------------------------------------------------------------------------------- +# # Create boundary conditions. +# #------------------------------------------------------------------------------- +# plane1 = Plane(Vector(0.0, 0.0, 0.0), Vector( 0.0, 0.0, 1.0)) +# bc1 = ReflectingBoundary(plane1) +# bcSet = [bc1] + +# for p in packages: +# for bc in bcSet: +# p.appendBoundary(bc) + +#------------------------------------------------------------------------------- +# Fields and Variables +#------------------------------------------------------------------------------- +numNodeLists = db.numNodeLists +nodeLists = db.nodeLists() + +position = db.DEMPosition +mass = db.DEMMass +velocity = db.DEMVelocity +H = db.DEMHfield +radius = db.DEMParticleRadius +compositeParticleIndex = db.DEMCompositeParticleIndex + +uniqueIndex = db.DEMUniqueIndex +omega = dem.omega + +# pure rolling +if mpi.rank == 0 : + for i in range(nodes.numInternalNodes): + if i > 0: + velocity[0][i]=Vector(particleVelocity,0,0) + omega[0][i]=Vector(0,particleVelocity/particleRadius,0) + +#------------------------------------------------------------------------------- +# Initial Conditions +#------------------------------------------------------------------------------- +#------------------------------------------------------------------------------- +# Construct a time integrator, and add the physics packages. +#------------------------------------------------------------------------------- + +integrator = IntegratorConstructor(db) +for p in packages: + integrator.appendPhysicsPackage(p) +integrator.lastDt = dt +integrator.dtMin = dtMin +integrator.dtMax = dtMax +integrator.dtGrowth = dtGrowth +integrator.domainDecompositionIndependent = domainIndependent +integrator.verbose = dtverbose +integrator.rigorousBoundaries = rigorousBoundaries + +integrator.cullGhostNodes = False + +output("integrator") +output("integrator.havePhysicsPackage(dem)") +output("integrator.lastDt") +output("integrator.dtMin") +output("integrator.dtMax") +output("integrator.dtGrowth") +output("integrator.domainDecompositionIndependent") +output("integrator.rigorousBoundaries") +output("integrator.verbose") + +#------------------------------------------------------------------------------- +# Periodic Work Function: Track conseravation +#------------------------------------------------------------------------------- + +conservation = TrackConservation(db, + dem, + verbose=True) + +periodicWork = [(conservation.periodicWorkFunction,1)] + + +#------------------------------------------------------------------------------- +# Make the problem controller. +#------------------------------------------------------------------------------- +from SpheralPointmeshSiloDump import dumpPhysicsState +control = SpheralController(integrator, + iterateInitialH = False, + initializeDerivatives = True, + statsStep = statsStep, + restartStep = restartStep, + redistributeStep=redistributeStep, + restartBaseName = restartBaseName, + restoreCycle = restoreCycle, + vizBaseName = vizBaseName, + vizMethod = dumpPhysicsState, + vizGhosts=True, + vizDir = vizDir, + vizStep = vizCycle, + vizTime = vizTime, + periodicWork=periodicWork) +output("control") + +#control.redistribute = PeanoHilbertOrderRedistributeNodes(db.maxKernelExtent,workBalance=False) +#------------------------------------------------------------------------------- +# Advance to the end time. +#------------------------------------------------------------------------------- + +if not steps is None: + control.step(steps) +else: + control.advance(goalTime, maxSteps) + diff --git a/tests/functional/DEM/LinearSpringDEM/SolidBoundaryCondition/singleParticleBoundaryCollision-3d.py b/tests/functional/DEM/LinearSpringDEM/SolidBoundaryCondition/singleParticleBoundaryCollision-3d.py index f02deb3cb..c2e04c9ff 100644 --- a/tests/functional/DEM/LinearSpringDEM/SolidBoundaryCondition/singleParticleBoundaryCollision-3d.py +++ b/tests/functional/DEM/LinearSpringDEM/SolidBoundaryCondition/singleParticleBoundaryCollision-3d.py @@ -301,16 +301,16 @@ if boolCheckSlidingFrictionX: if omega[0][0].magnitude() > omega0: raise ValueError("particles are rotating faster post-collision") - if omega[0][0].y > omegaThreshold or omega[0][0].z > omegaThreshold: + if abs(omega[0][0].x) > omegaThreshold or abs(omega[0][0].z) > omegaThreshold: raise ValueError("erroneous spin-up in perpendicular direction") if boolCheckSlidingFrictionY: if omega[0][0].magnitude() > omega0: raise ValueError("particles are rotating faster post-collision") - if omega[0][0].x > omegaThreshold or omega[0][0].z > omegaThreshold: + if abs(omega[0][0].y) > omegaThreshold or abs(omega[0][0].z) > omegaThreshold: raise ValueError("erroneous spin-up in perpendicular direction") if boolCheckTorsionalFriction: if omega[0][0].magnitude() > omega0: raise ValueError("particles are rotating faster post-collision") - if omega[0][0].x > omegaThreshold or omega[0][0].y > omegaThreshold: + if abs(omega[0][0].x) > omegaThreshold or abs(omega[0][0].y) > omegaThreshold: raise ValueError("erroneous spin-up in perpendicular direction") diff --git a/tests/functional/DEM/LinearSpringDEM/TwoParticleCollision/twoParticleCollision-3d.py b/tests/functional/DEM/LinearSpringDEM/TwoParticleCollision/twoParticleCollision-3d.py index 9030f7422..27192d7dd 100644 --- a/tests/functional/DEM/LinearSpringDEM/TwoParticleCollision/twoParticleCollision-3d.py +++ b/tests/functional/DEM/LinearSpringDEM/TwoParticleCollision/twoParticleCollision-3d.py @@ -77,6 +77,7 @@ checkConservation = False, # turn on error checking for momentum conservation restitutionErrorThreshold = 0.02, # relative error actual restitution vs nominal conservationErrorThreshold = 1e-15, # relative error for momentum conservation + omegaThreshold = 1e-14, # tolerance for erroneous spin up in inactive directions torsionalObjectivityThreshold = 1e-10 # relative error bounds on torsion objectivity test ) @@ -342,15 +343,33 @@ if conservation.deltaRotationalMomentumZ() > conservationErrorThreshold: raise ValueError("rotational momentum -z conservation error, %g, exceeds bounds" % conservation.deltaRotationalMomentumZ()) -if boolCheckSlidingFriction or boolCheckRollingFriction or boolCheckTorsionalFriction: +if boolCheckSlidingFriction or boolCheckRollingFriction: # check for non-physical behavior #------------------------------------------------------------- if omega[0][0].magnitude()+omega[0][1].magnitude() > 2*omega0: raise ValueError("particles are rotating faster post-collision") - + if abs(omega[0][0].x) > omegaThreshold or abs(omega[0][0].y) > omegaThreshold: + raise ValueError("erroneous spin-up of particle 0 in perpendicular direction") + if abs(omega[0][1].x) > omegaThreshold or abs(omega[0][1].y) > omegaThreshold: + raise ValueError("erroneous spin-up of particle 1 in perpendicular direction") + +if boolCheckTorsionalFriction: +# check for non-physical behavior +#------------------------------------------------------------- + if omega[0][0].magnitude()+omega[0][1].magnitude() > 2*omega0: + raise ValueError("particles are rotating faster post-collision") + if abs(omega[0][0].z) > omegaThreshold or abs(omega[0][0].y) > omegaThreshold: + raise ValueError("erroneous spin-up of particle 0 in perpendicular direction") + if abs(omega[0][1].z) > omegaThreshold or abs(omega[0][1].y) > omegaThreshold: + raise ValueError("erroneous spin-up of particle 1 in perpendicular direction") + if boolCheckTorsionalObjectivity: # to satify objectivity omega (along axis) should not change when equal #------------------------------------------------------------- omegaError = (2*omega0 - omega[0][0][0] - omega[0][1][0]) / (2*omega0) if omegaError > torsionalObjectivityThreshold: raise ValueError("torsional objectivity failure with relative angular velocity error, %g, exceeds bounds" % omegaError) + if abs(omega[0][0].z) > omegaThreshold or abs(omega[0][0].y) > omegaThreshold: + raise ValueError("erroneous spin-up of particle 0 in perpendicular direction") + if abs(omega[0][1].z) > omegaThreshold or abs(omega[0][1].y) > omegaThreshold: + raise ValueError("erroneous spin-up of particle 1 in perpendicular direction") \ No newline at end of file diff --git a/tests/functional/Hydro/KelvinHelmholtz/KelvinHelmholtz-2d.py b/tests/functional/Hydro/KelvinHelmholtz/KelvinHelmholtz-2d.py index f1e064d36..51d7ae2ed 100644 --- a/tests/functional/Hydro/KelvinHelmholtz/KelvinHelmholtz-2d.py +++ b/tests/functional/Hydro/KelvinHelmholtz/KelvinHelmholtz-2d.py @@ -8,7 +8,7 @@ from math import * from Spheral2d import * from SpheralTestUtilities import * -from SpheralGnuPlotUtilities import * +#from SpheralGnuPlotUtilities import * from findLastRestart import * from GenerateNodeDistribution2d import * from CompositeNodeDistribution import * @@ -28,9 +28,9 @@ # Generic problem parameters #------------------------------------------------------------------------------- commandLine(nx1 = 100, - ny1 = 50, + ny1 = 50, nx2 = 100, - ny2 = 50, + ny2 = 50, rho1 = 2.0, rho2 = 1.0, @@ -51,9 +51,9 @@ # kernel HUpdate = IdealH, - nPerh = 1.51, - KernelConstructor = BSplineKernel, - order = 5, + nPerh = 3.0, + KernelConstructor = WendlandC2Kernel, + order = 3, hmin = 0.0001, hmax = 0.5, hminratio = 0.1, @@ -64,6 +64,8 @@ crksph = False, fsisph = False, gsph = False, + mfm = False, + mfv = False, # hydro options solid = False, # fluid limit of the solid hydro @@ -78,6 +80,7 @@ evolveTotalEnergy = False, # integrate total instead of specific energy gradhCorrection = True, # correct for temporal variation in h correctVelocityGradient = True, # linear exact velocity gradient (M correction) (corrected kernesl for GSPH and FSISPH) + # SVPH parameters fcentroidal = 0.0, fcellPressure = 0.0, @@ -90,14 +93,17 @@ fsiInterfaceMethod = HLLCInterface, # (HLLCInterface, ModulusInterface) fsiKernelMethod = NeverAverageKernels, # (NeverAverageKernels, AlwaysAverageKernels, AverageInterfaceKernels) - # GSPH parameters + # GSPH/MFM/MFV parameters gsphEpsDiffuseCoeff = 0.0, gsphLinearCorrect = True, + LimiterConstructor = VanLeerLimiter, + WaveSpeedConstructor = DavisWaveSpeed, + nodeMotionCoefficient = 0.2, # artificial viscosity + Qconstructor = LimitedMonaghanGingoldViscosity, Cl = 1.0, Cq = 1.0, - Qconstructor = MonaghanGingoldViscosity, linearConsistent = False, boolReduceViscosity = False, nh = 5.0, @@ -114,7 +120,7 @@ arCondAlpha = 0.5, # integrator - cfl = 0.5, + cfl = 0.25, IntegratorConstructor = CheapSynchronousRK2Integrator, goalTime = 2.0, steps = None, @@ -150,21 +156,32 @@ assert not svph assert not (compatibleEnergy and evolveTotalEnergy) -assert sum([fsisph,psph,gsph,crksph,svph])<=1 +assert sum([fsisph,psph,gsph,crksph,svph,mfm])<=1 assert not (fsisph and not solid) +assert not ((mfm or gsph or mfv) and ( boolReduceViscosity)) # Decide on our hydro algorithm. hydroname = 'SPH' +useArtificialViscosity=True + if svph: hydroname = "SVPH" elif crksph: hydroname = "CRK"+hydroname + Qconstructor = LimitedMonaghanGingoldViscosity elif psph: hydroname = "P"+hydroname elif fsisph: hydroname = "FSI"+hydroname elif gsph: hydroname = "G"+hydroname + useArtificialViscosity=False +elif mfm: + hydroname = "MFM" + useArtificialViscosity=False +elif mfv: + hydroname = "MFV" + useArtificialViscosity=False if asph: hydorname = "A"+hydroname if solid: @@ -186,12 +203,6 @@ restartBaseName = os.path.join(restartDir, "KelvinHelmholtz-2d") vizBaseName = "KelvinHelmholtz-2d" -#------------------------------------------------------------------------------- -# CRKSPH Switches to ensure consistency -#------------------------------------------------------------------------------- -if crksph or fsisph: - Qconstructor = LimitedMonaghanGingoldViscosity - #------------------------------------------------------------------------------- # Check if the necessary output directories exist. If not, create them. #------------------------------------------------------------------------------- @@ -218,14 +229,12 @@ #------------------------------------------------------------------------------- # Interpolation kernels. #------------------------------------------------------------------------------- -if KernelConstructor=="NBSplineKernel": +if KernelConstructor == NBSplineKernel: WT = TableKernel(NBSplineKernel(order), 1000) - WTPi = TableKernel(NBSplineKernel(order), 1000, Qhmult) else: WT = TableKernel(KernelConstructor(), 1000) - WTPi = TableKernel(KernelConstructor(), 1000, Qhmult) output("WT") -output("WTPi") + kernelExtent = WT.kernelExtent #------------------------------------------------------------------------------- @@ -332,7 +341,7 @@ def vy(ri): #------------------------------------------------------------------------------- # Construct the artificial viscosity. #------------------------------------------------------------------------------- -if not gsph: +if useArtificialViscosity: q = Qconstructor(Cl, Cq, linearInExpansion) q.epsilon2 = epsilon2 q.limiter = Qlimiter @@ -351,6 +360,7 @@ def vy(ri): #------------------------------------------------------------------------------- if crksph: hydro = CRKSPH(dataBase = db, + Q=q, cfl = cfl, filter = filter, epsTensile = epsilonTensile, @@ -391,18 +401,54 @@ def vy(ri): ASPH = asph, epsTensile = epsilonTensile) elif gsph: - limiter = VanLeerLimiter() - waveSpeed = DavisWaveSpeed() + limiter = LimiterConstructor() + waveSpeed = WaveSpeedConstructor() solver = HLLC(limiter,waveSpeed,gsphLinearCorrect) hydro = GSPH(dataBase = db, riemannSolver = solver, W = WT, cfl=cfl, - specificThermalEnergyDiffusionCoefficient = gsphEpsDiffuseCoeff, compatibleEnergyEvolution = compatibleEnergy, correctVelocityGradient= correctVelocityGradient, evolveTotalEnergy = evolveTotalEnergy, densityUpdate=densityUpdate, + gradientType = SPHSameTimeGradient, + XSPH = xsph, + ASPH = asph, + epsTensile = epsilonTensile, + nTensile = nTensile) +elif mfm: + limiter = LimiterConstructor() + waveSpeed = WaveSpeedConstructor() + solver = HLLC(limiter,waveSpeed,gsphLinearCorrect) + hydro = MFM(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient= correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + gradientType = SPHSameTimeGradient, + densityUpdate=densityUpdate, + XSPH = xsph, + ASPH = asph, + epsTensile = epsilonTensile, + nTensile = nTensile) +elif mfv: + limiter = LimiterConstructor() + waveSpeed = WaveSpeedConstructor() + solver = HLLC(limiter,waveSpeed,gsphLinearCorrect) + hydro = MFV(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient= correctVelocityGradient, + nodeMotionCoefficient = nodeMotionCoefficient, + nodeMotionType = NodeMotionType.Lagrangian, + gradientType = SPHSameTimeGradient, + evolveTotalEnergy = evolveTotalEnergy, + densityUpdate=densityUpdate, XSPH = xsph, ASPH = asph, epsTensile = epsilonTensile, @@ -433,7 +479,7 @@ def vy(ri): #------------------------------------------------------------------------------- # Construct the MMRV physics object. #------------------------------------------------------------------------------- -if boolReduceViscosity: +if boolReduceViscosity and useArtificialViscosity: evolveReducingViscosityMultiplier = MorrisMonaghanReducingViscosity(q,nh,aMin,aMax) packages.append(evolveReducingViscosityMultiplier) @@ -494,6 +540,7 @@ def vy(ri): # import SpheralPointmeshSiloDump # vizMethod = SpheralPointmeshSiloDump.dumpPhysicsState control = SpheralController(integrator, WT, + initializeDerivatives=True, statsStep = statsStep, restartStep = restartStep, restartBaseName = restartBaseName, diff --git a/tests/functional/Hydro/KelvinHelmholtz/KelvinHelmholtz-2d_McNally.py b/tests/functional/Hydro/KelvinHelmholtz/KelvinHelmholtz-2d_McNally.py index 05feab1b9..b0f2ce737 100644 --- a/tests/functional/Hydro/KelvinHelmholtz/KelvinHelmholtz-2d_McNally.py +++ b/tests/functional/Hydro/KelvinHelmholtz/KelvinHelmholtz-2d_McNally.py @@ -64,12 +64,13 @@ psph = False, fsisph = False, gsph = False, + mfm = False, # hydro options solid = False, asph = False, # Just for choosing the H algorithm - useVelocityMagnitudeForDt = False, XSPH = False, + useVelocityMagnitudeForDt = False, epsilonTensile = 0.0, nTensile = 8, filter = 0.0, @@ -80,6 +81,8 @@ evolveTotalEnergy = False, # artificial viscosity + Cl = None, + Cq = None, linearConsistent = False, boolReduceViscosity = False, nh = 5.0, @@ -94,8 +97,6 @@ betaE = 1.0, fKern = 1.0/3.0, boolHopkinsCorrection = True, - Cl = None, - Cq = None, linearInExpansion = False, Qlimiter = None, balsaraCorrection = False, @@ -120,6 +121,8 @@ # GSPH parameters gsphEpsDiffuseCoeff = 0.0, gsphLinearCorrect = True, + LimiterConstructor = VanLeerLimiter, + WaveSpeedConstructor = DavisWaveSpeed, ## integrator cfl = 0.5, @@ -164,10 +167,12 @@ assert numNodeLists in (1, 2) assert not svph assert not (compatibleEnergy and evolveTotalEnergy) -assert sum([fsisph,psph,gsph,crksph,svph])<=1 +assert sum([fsisph,psph,gsph,crksph,svph,mfm])<=1 assert not (fsisph and not solid) +assert not ((mfm or gsph) and (boolCullenViscosity or boolReduceViscosity)) # hydro algorithm label +useArtificialViscosity = True if svph: hydroname = "SVPH" elif crksph: @@ -180,11 +185,19 @@ hydroname = "FSISPH" elif gsph: hydroname = "GSPH" + useArtificialViscosity=False +elif mfm: + hydroname = "MFM" + useArtificialViscosity=False else: hydroname = "SPH" + if asph: hydroname = "A" + hydroname +if solid: + hydroname = "solid" + hydroname + dataDir = os.path.join(dataDir, "rho1=%g-rho2=%g" % (rho1, rho2), "vx1=%g-vx2=%g" % (abs(vx1), abs(vx2)), @@ -429,14 +442,29 @@ ASPH = asph, epsTensile = epsilonTensile) elif gsph: - limiter = VanLeerLimiter() - waveSpeed = DavisWaveSpeed() + limiter = LimiterConstructor + waveSpeed = WaveSpeedConstructor + solver = HLLC(limiter,waveSpeed,gsphLinearCorrect) + hydro = GSPH(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient= correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + densityUpdate=densityUpdate, + XSPH = XSPH, + ASPH = asph, + epsTensile = epsilonTensile, + nTensile = nTensile) +elif mfm: + limiter = LimiterConstructor + waveSpeed = WaveSpeedConstructor solver = HLLC(limiter,waveSpeed,gsphLinearCorrect) hydro = GSPH(dataBase = db, riemannSolver = solver, W = WT, cfl=cfl, - specificThermalEnergyDiffusionCoefficient = gsphEpsDiffuseCoeff, compatibleEnergyEvolution = compatibleEnergy, correctVelocityGradient= correctVelocityGradient, evolveTotalEnergy = evolveTotalEnergy, @@ -471,7 +499,7 @@ #------------------------------------------------------------------------------- # Set the artificial viscosity parameters. #------------------------------------------------------------------------------- -if not gsph: +if useArtificialViscosity: q = hydro.Q if not Cl is None: q.Cl = Cl @@ -498,10 +526,10 @@ #------------------------------------------------------------------------------- # Construct the MMRV physics object. #------------------------------------------------------------------------------- -if boolReduceViscosity: +if boolReduceViscosity and useArtificialViscosity: evolveReducingViscosityMultiplier = MorrisMonaghanReducingViscosity(q,nh,aMin,aMax) packages.append(evolveReducingViscosityMultiplier) -elif boolCullenViscosity: +elif boolCullenViscosity and useArtificialViscosity: evolveCullenViscosityMultiplier = CullenDehnenViscosity(q,WT,alphMax,alphMin,betaC,betaD,betaE,fKern,boolHopkinsCorrection) packages.append(evolveCullenViscosityMultiplier) diff --git a/tests/functional/Hydro/Noh/Noh-cylindrical-2d.py b/tests/functional/Hydro/Noh/Noh-cylindrical-2d.py index 636f4332d..8be099298 100644 --- a/tests/functional/Hydro/Noh/Noh-cylindrical-2d.py +++ b/tests/functional/Hydro/Noh/Noh-cylindrical-2d.py @@ -4,6 +4,11 @@ #ATS:sph0 = test( SELF, "--crksph False --nRadial 100 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --clearDirectories True --steps 100", label="Noh cylindrical SPH, nPerh=2.0", np=8) #ATS:sph1 = testif(sph0, SELF, "--crksph False --nRadial 100 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --clearDirectories False --steps 60 --restoreCycle 40 --checkRestart True", label="Noh cylindrical SPH, nPerh=2.0, restart test", np=8) # +# ASPH +# +#ATS:asph0 = test( SELF, "--crksph False --asph True --nRadial 100 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --clearDirectories True --steps 100", label="Noh cylindrical ASPH, nPerh=2.0", np=8) +#ATS:asph1 = testif(sph0, SELF, "--crksph False --asph True --nRadial 100 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --clearDirectories False --steps 60 --restoreCycle 40 --checkRestart True", label="Noh cylindrical ASPH, nPerh=2.0, restart test", np=8) +# # CRK (SumVolume) # #ATS:crk0 = test( SELF, "--crksph True --nRadial 20 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --volumeType RKSumVolume --clearDirectories True --steps 50", label="Noh cylindrical CRK (sum vol), nPerh=2.0", np=2) @@ -14,6 +19,16 @@ #ATS:crk2 = test( SELF, "--crksph True --nRadial 20 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --volumeType RKVoronoiVolume --clearDirectories True --steps 50", label="Noh cylindrical CRK (Voronoi vol), nPerh=2.0", np=2) #ATS:crk3 = testif(crk2, SELF, "--crksph True --nRadial 20 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --volumeType RKVoronoiVolume --clearDirectories False --steps 10 --restoreCycle 40 --checkRestart True", label="Noh cylindrical CRK (Voronoi vol) , nPerh=2.0, restart test", np=2) # +# ACRK (SumVolume) +# +#ATS:acrk0 = test( SELF, "--crksph True --asph True --nRadial 20 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --volumeType RKSumVolume --clearDirectories True --steps 50", label="Noh cylindrical ACRK (sum vol), nPerh=2.0", np=2) +#ATS:acrk1 = testif(acrk0, SELF, "--crksph True --asph True --nRadial 20 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --volumeType RKSumVolume --clearDirectories False --steps 10 --restoreCycle 40 --checkRestart True", label="Noh cylindrical ACRK (sum vol), nPerh=2.0, restart test", np=2) +# +# ACRK (VoroniVolume) +# +#ATS:acrk2 = test( SELF, "--crksph True --asph True --nRadial 20 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --volumeType RKVoronoiVolume --clearDirectories True --steps 50", label="Noh cylindrical ACRK (Voronoi vol), nPerh=2.0", np=2) +#ATS:acrk3 = testif(acrk2, SELF, "--crksph True --asph True --nRadial 20 --cfl 0.25 --Cl 1.0 --Cq 1.0 --filter 0.0 --nPerh 2.01 --graphics False --restartStep 20 --volumeType RKVoronoiVolume --clearDirectories False --steps 10 --restoreCycle 40 --checkRestart True", label="Noh cylindrical ACRK (Voronoi vol) , nPerh=2.0, restart test", np=2) +# # GSPH # #ATS:gsph0 = test( SELF, "--gsph True --nRadial 100 --cfl 0.25 --nPerh 2.01 --graphics False --restartStep 20 --clearDirectories True --steps 100", label="Noh cylindrical GSPH, nPerh=2.0", np=8) @@ -25,24 +40,27 @@ # # W.F. Noh 1987, JCP, 72, 78-120. #------------------------------------------------------------------------------- -import os, shutil +import os, shutil, mpi, sys from math import * + from SolidSpheral2d import * from SpheralTestUtilities import * from GenerateNodeDistribution2d import * from CubicNodeGenerator import GenerateSquareNodeDistribution from CentroidalVoronoiRelaxation import * -import mpi -import DistributeNodes +if mpi.procs > 1: + from VoronoiDistributeNodes import distributeNodes2d + #from PeanoHilbertDistributeNodes import distributeNodes2d +else: + from DistributeNodes import distributeNodes2d title("2-D integrated hydro test -- cylindrical Noh problem") #------------------------------------------------------------------------------- # Generic problem parameters #------------------------------------------------------------------------------- -commandLine(order = 5, - seed = "constantDTheta", +commandLine(seed = "constantDTheta", thetaFactor = 0.5, azimuthalOffsetFraction = 0.0, @@ -50,7 +68,6 @@ nTheta = 50, rmin = 0.0, rmax = 1.0, - nPerh = 2.01, rho0 = 1.0, eps0 = 0.0, smallPressure = False, @@ -60,22 +77,53 @@ gamma = 5.0/3.0, mu = 1.0, + # hydro type (only one!) + svph = False, + crksph = False, # high order conservative formulation of SPH + psph = False, # pressure-based formulation of SPH + fsisph = False, # formulation for multimaterial problems + gsph = False, # godunov SPH + mfm = False, # moving finite mass of Hopkins 2015 + mfv=False, # moving finite volume of Hopkins 2015 + asph = False, # This just chooses the H algorithm -- you can use this with CRKSPH for instance. solid = False, # If true, use the fluid limit of the solid hydro option - svph = False, - crksph = False, - psph = False, - fsisph = False, - gsph = False, - mfm = False, - asph = False, # This just chooses the H algorithm -- you can use this with CRKSPH for instance. - boolReduceViscosity = False, + # general hydro options + densityUpdate = RigorousSumDensity, # (IntegrateDensity) + evolveTotalEnergy = False, # evolve total rather than specific energy + compatibleEnergy = True, # evolve specific in a energy conserving manner + gradhCorrection = True, # only for SPH, PSPH (correction for time evolution of h) + correctVelocityGradient = True, # linear gradient correction + XSPH = False, # monaghan's xsph -- move w/ averaged velocity + epsilonTensile = 0.0, # coefficient for the tensile correction + nTensile = 8, # exponent for tensile correction + filter = 0.0, + + # PSPH options HopkinsConductivity = False, # For PSPH + + #CRKSPH options + correctionOrder = LinearOrder, + volumeType = RKSumVolume, + + # MFV + nodeMotion = NodeMotionType.Lagrangian, + + # artificial viscosity + Cl = None, + Cq = None, + linearInExpansion = None, + Qlimiter = None, + balsaraCorrection = None, + epsilon2 = None, + + boolReduceViscosity = False, # morris-monaghan reducing AV nhQ = 5.0, nhL = 10.0, aMin = 0.1, aMax = 2.0, - boolCullenViscosity = False, + + boolCullenViscosity = False, # cullen dehnen AV limiter alphMax = 2.0, alphMin = 0.02, betaC = 0.7, @@ -85,22 +133,18 @@ boolHopkinsCorrection = True, linearConsistent = False, - Cl = None, - Cq = None, - linearInExpansion = None, - Qlimiter = None, - balsaraCorrection = None, - epsilon2 = None, + # kernel options + KernelConstructor = NBSplineKernel, #(NBSplineKernel,WendlandC2Kernel,WendlandC4Kernel,WendlandC6Kernel) + nPerh = 2.01, + HUpdate = IdealH, + order = 5, hmin = 0.0001, hmax = 0.5, hminratio = 0.1, - cfl = 0.25, - XSPH = False, - epsilonTensile = 0.0, - nTensile = 8, - filter = 0.0, + # integrator options IntegratorConstructor = CheapSynchronousRK2Integrator, + cfl = 0.25, goalTime = 0.6, steps = None, vizCycle = None, @@ -112,19 +156,11 @@ maxSteps = None, statsStep = 10, smoothIters = 0, - HUpdate = IdealH, - correctionOrder = LinearOrder, - volumeType = RKSumVolume, domainIndependent = False, rigorousBoundaries = False, dtverbose = False, - densityUpdate = RigorousSumDensity, # VolumeScaledDensity, - evolveTotalEnergy = False, # Only for SPH variants -- evolve total rather than specific energy - compatibleEnergy = True, - gradhCorrection = True, - correctVelocityGradient = True, - + # output options useVoronoiOutput = True, clearDirectories = False, vizDerivs = False, @@ -141,6 +177,7 @@ assert not(boolReduceViscosity and boolCullenViscosity) assert not((gsph or mfm) and (boolReduceViscosity or boolCullenViscosity)) assert not(fsisph and not solid) +assert sum([crksph,psph,fsisph,svph,gsph,mfm,mfv])<=1 assert thetaFactor in (0.5, 1.0, 2.0) theta = thetaFactor * pi @@ -163,10 +200,14 @@ hydroname = os.path.join("CRKSPH", str(correctionOrder), str(volumeType)) +elif fsisph: + hydroname = "FSISPH" elif gsph: hydroname = "GSPH" elif mfm: hydroname = "MFM" +elif mfv: + hydroname = "MFV" elif psph: hydroname = "PSPH" else: @@ -183,6 +224,7 @@ "compatibleEnergy=%s" % compatibleEnergy, "Cullen=%s" % boolCullenViscosity, "filter=%f" % filter, + "%s" % nodeMotion, "nrad=%i_ntheta=%i" % (nRadial, nTheta)) restartDir = os.path.join(dataDir, "restarts") restartBaseName = os.path.join(restartDir, "Noh-cylindrical-2d-%ix%i" % (nRadial, nTheta)) @@ -196,7 +238,6 @@ #------------------------------------------------------------------------------- # Check if the necessary output directories exist. If not, create them. #------------------------------------------------------------------------------- -import os, sys if mpi.rank == 0: if clearDirectories and os.path.exists(dataDir): shutil.rmtree(dataDir) @@ -214,7 +255,10 @@ #------------------------------------------------------------------------------- # Interpolation kernels. #------------------------------------------------------------------------------- -WT = TableKernel(NBSplineKernel(order), 1000) +if KernelConstructor==NBSplineKernel: + WT = TableKernel(KernelConstructor(order), 1000) +else: + WT = TableKernel(KernelConstructor(), 1000) output("WT") kernelExtent = WT.kernelExtent @@ -250,8 +294,8 @@ generator = GenerateSquareNodeDistribution(nRadial, nTheta, rho0, - xmin, - xmax, + xmin=xmin, + xmax=xmax, nNodePerh = nPerh, SPH = not asph) else: @@ -265,12 +309,6 @@ nNodePerh = nPerh, SPH = not asph) -if mpi.procs > 1: - from VoronoiDistributeNodes import distributeNodes2d - #from PeanoHilbertDistributeNodes import distributeNodes2d -else: - from DistributeNodes import distributeNodes2d - distributeNodes2d((nodes1, generator)) output("mpi.reduce(nodes1.numInternalNodes, mpi.MIN)") output("mpi.reduce(nodes1.numInternalNodes, mpi.MAX)") @@ -339,7 +377,7 @@ cfl = cfl, interfaceMethod = HLLCModulus, sumDensityNodeLists=[nodes1], - densityStabilizationCoefficient = 0.00, + densityStabilizationCoefficient = 0.1, compatibleEnergyEvolution = compatibleEnergy, evolveTotalEnergy = evolveTotalEnergy, linearCorrectGradients = correctVelocityGradient, @@ -358,7 +396,7 @@ evolveTotalEnergy = evolveTotalEnergy, XSPH = XSPH, ASPH = asph, - gradientType = RiemannGradient, + gradientType = SPHSameTimeGradient, densityUpdate=densityUpdate, HUpdate = HUpdate, epsTensile = epsilonTensile, @@ -377,7 +415,28 @@ evolveTotalEnergy = evolveTotalEnergy, XSPH = XSPH, ASPH = asph, - gradientType = RiemannGradient, + gradientType = HydroAccelerationGradient, + densityUpdate=densityUpdate, + HUpdate = HUpdate, + epsTensile = epsilonTensile, + nTensile = nTensile) + +elif mfv: + limiter = VanLeerLimiter() + waveSpeed = DavisWaveSpeed() + solver = HLLC(limiter,waveSpeed,True) + hydro = MFV(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + nodeMotionType=nodeMotion, + specificThermalEnergyDiffusionCoefficient = 0.00, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient= correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + XSPH = XSPH, + ASPH = asph, + gradientType = SPHSameTimeGradient, densityUpdate=densityUpdate, HUpdate = HUpdate, epsTensile = epsilonTensile, @@ -410,7 +469,7 @@ #------------------------------------------------------------------------------- # Set the artificial viscosity parameters. #------------------------------------------------------------------------------- -if not (gsph or mfm): +if not (gsph or mfm or mfv): q = hydro.Q if Cl: q.Cl = Cl @@ -507,7 +566,7 @@ vizTime = vizTime, vizDerivs = vizDerivs, #skipInitialPeriodicWork = SVPH, - SPH = True, # Only for iterating H + SPH = not asph, # Only for iterating H ) output("control") @@ -716,7 +775,11 @@ comparisonFile = os.path.join(dataDir, comparisonFile) import filecmp assert filecmp.cmp(outputFile, comparisonFile) + + +Masserror = (control.conserve.massHistory[-1] - control.conserve.massHistory[0])/max(1.0e-30, control.conserve.massHistory[0]) Eerror = (control.conserve.EHistory[-1] - control.conserve.EHistory[0])/max(1.0e-30, control.conserve.EHistory[0]) +print("Total mass error: %g" % Masserror) print("Total energy error: %g" % Eerror) if compatibleEnergy and abs(Eerror) > 1e-13: raise ValueError("Energy error outside allowed bounds.") diff --git a/tests/functional/Hydro/Noh/Noh-planar-1d.py b/tests/functional/Hydro/Noh/Noh-planar-1d.py index c261b5b09..0d2f7d1bd 100644 --- a/tests/functional/Hydro/Noh/Noh-planar-1d.py +++ b/tests/functional/Hydro/Noh/Noh-planar-1d.py @@ -50,17 +50,17 @@ # # GSPH # -#ATS:t500 = test( SELF, "--gsph True --gsphReconstructionGradient=RiemannGradient --graphics None --clearDirectories True --checkError True --restartStep 20", label="Planar Noh problem with GSPH and RiemannGradient -- 1-D (serial)") -#ATS:t501 = testif(t500, SELF, "--gsph True --gsphReconstructionGradient=RiemannGradient --graphics None --clearDirectories False --checkError False --restartStep 20 --restoreCycle 20 --steps 20 --checkRestart True", label="Planar Noh problem with GSPH and RiemannGradient -- 1-D (serial) RESTART CHECK") -#ATS:t502 = test( SELF, "--gsph True --gsphReconstructionGradient=HydroAccelerationGradient --graphics None --clearDirectories True --checkError True --restartStep 20", label="Planar Noh problem with GSPH and and HydroAccelerationGradient -- 1-D (serial)") -#ATS:t503 = testif(t502, SELF, "--gsph True --gsphReconstructionGradient=HydroAccelerationGradient --graphics None --clearDirectories False --checkError False --restartStep 20 --restoreCycle 20 --steps 20 --checkRestart True", label="Planar Noh problem with GSPH and HydroAccelerationGradient -- 1-D (serial) RESTART CHECK") -#ATS:t504 = test( SELF, "--gsph True --gsphReconstructionGradient=SPHGradient --graphics None --clearDirectories True --checkError True --restartStep 20", label="Planar Noh problem with GSPH and SPHGradient -- 1-D (serial)") -#ATS:t505 = testif(t504, SELF, "--gsph True --gsphReconstructionGradient=SPHGradient --graphics None --clearDirectories False --checkError False --restartStep 20 --restoreCycle 20 --steps 20 --checkRestart True", label="Planar Noh problem with GSPH and SPHGradient -- 1-D (serial) RESTART CHECK") +#ATS:t500 = test( SELF, "--gsph True --gsphReconstructionGradient RiemannGradient --graphics None --clearDirectories True --checkError True --restartStep 20", label="Planar Noh problem with GSPH and RiemannGradient -- 1-D (serial)") +#ATS:t501 = testif(t500, SELF, "--gsph True --gsphReconstructionGradient RiemannGradient --graphics None --clearDirectories False --checkError False --restartStep 20 --restoreCycle 20 --steps 20 --checkRestart True", label="Planar Noh problem with GSPH and RiemannGradient -- 1-D (serial) RESTART CHECK") +#ATS:t502 = test( SELF, "--gsph True --gsphReconstructionGradient HydroAccelerationGradient --graphics None --clearDirectories True --checkError True --restartStep 20", label="Planar Noh problem with GSPH and and HydroAccelerationGradient -- 1-D (serial)") +#ATS:t503 = testif(t502, SELF, "--gsph True --gsphReconstructionGradient HydroAccelerationGradient --graphics None --clearDirectories False --checkError False --restartStep 20 --restoreCycle 20 --steps 20 --checkRestart True", label="Planar Noh problem with GSPH and HydroAccelerationGradient -- 1-D (serial) RESTART CHECK") +#ATS:t504 = test( SELF, "--gsph True --gsphReconstructionGradient SPHGradient --graphics None --clearDirectories True --checkError True --restartStep 20", label="Planar Noh problem with GSPH and SPHGradient -- 1-D (serial)") +#ATS:t505 = testif(t504, SELF, "--gsph True --gsphReconstructionGradient SPHGradient --graphics None --clearDirectories False --checkError False --restartStep 20 --restoreCycle 20 --steps 20 --checkRestart True", label="Planar Noh problem with GSPH and SPHGradient -- 1-D (serial) RESTART CHECK") # # MFM # -#ATS:t600 = test( SELF, "--mfm True --gsphReconstructionGradient=RiemannGradient --graphics None --clearDirectories True --checkError False --restartStep 20", label="Planar Noh problem with MFM -- 1-D (serial)") -#ATS:t601 = testif(t600, SELF, "--mfm True --gsphReconstructionGradient=RiemannGradient --graphics None --clearDirectories False --checkError False --restartStep 20 --restoreCycle 20 --steps 20 --checkRestart True", label="Planar Noh problem with MFM -- 1-D (serial) RESTART CHECK") +#ATS:t600 = test( SELF, "--mfm True --gsphReconstructionGradient RiemannGradient --graphics None --clearDirectories True --checkError False --restartStep 20", label="Planar Noh problem with MFM -- 1-D (serial)") +#ATS:t601 = testif(t600, SELF, "--mfm True --gsphReconstructionGradient RiemannGradient --graphics None --clearDirectories False --checkError False --restartStep 20 --restoreCycle 20 --steps 20 --checkRestart True", label="Planar Noh problem with MFM -- 1-D (serial) RESTART CHECK") import os, shutil, sys from SolidSpheral1d import * diff --git a/tests/functional/Hydro/Noh/Noh-spherical-3d.py b/tests/functional/Hydro/Noh/Noh-spherical-3d.py index d6d58a6a8..43226af1a 100644 --- a/tests/functional/Hydro/Noh/Noh-spherical-3d.py +++ b/tests/functional/Hydro/Noh/Noh-spherical-3d.py @@ -55,6 +55,7 @@ fsisph = False, gsph = False, mfm = False, + mfv = False, asph = False, # This just chooses the H algorithm -- you can use this with CRKSPH for instance. boolReduceViscosity = False, @@ -144,8 +145,10 @@ hydroname = "FSISPH" elif gsph: hydroname = "GSPH" -elif gsph: +elif mfm: hydroname = "MFM" +elif mfv: + hydroname = "MFV" else: hydroname = "SPH" if asph: @@ -327,6 +330,23 @@ HUpdate = IdealH, epsTensile = epsilonTensile, nTensile = nTensile) +elif mfv: + limiter = VanLeerLimiter() + waveSpeed = DavisWaveSpeed() + solver = HLLC(limiter,waveSpeed,True) + hydro = MFV(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient=correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + XSPH = XSPH, + gradientType = RiemannGradient, + densityUpdate=densityUpdate, + HUpdate = IdealH, + epsTensile = epsilonTensile, + nTensile = nTensile) elif psph: hydro = PSPH(dataBase = db, W = WT, @@ -360,7 +380,7 @@ output("hydro.cfl") output("hydro.compatibleEnergyEvolution") output("hydro.HEvolution") -if not (gsph or fsisph): +if not (gsph or mfm or mfv or fsisph): output("hydro.PiKernel") if not fsisph: output("hydro.densityUpdate") @@ -370,7 +390,7 @@ #------------------------------------------------------------------------------- # Set the artificial viscosity parameters. #------------------------------------------------------------------------------- -if not (gsph or mfm): +if not (gsph or mfm or mfv): q = hydro.Q if Cl: q.Cl = Cl diff --git a/tests/functional/Hydro/RayleighTaylor/RT-2d.py b/tests/functional/Hydro/RayleighTaylor/RT-2d.py index 150846102..c0203ce2e 100644 --- a/tests/functional/Hydro/RayleighTaylor/RT-2d.py +++ b/tests/functional/Hydro/RayleighTaylor/RT-2d.py @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # This is the basic Rayleigh-Taylor Problem #------------------------------------------------------------------------------- -import shutil +import shutil, os, sys, mpi from math import * from Spheral2d import * from SpheralTestUtilities import * @@ -10,13 +10,16 @@ from GenerateNodeDistribution2d import * from CompositeNodeDistribution import * from CentroidalVoronoiRelaxation import * +from HydrostaticReflectingBoundary import HydrostaticReflectingBoundary2d as HydrostaticReflectingBoundary -import mpi -import DistributeNodes +if mpi.procs > 1: + from PeanoHilbertDistributeNodes import distributeNodes2d +else: + from DistributeNodes import distributeNodes2d title("Rayleigh-Taylor test problem in 2D") -class ExponentialDensity: +class ExponentialProfile: def __init__(self, y1, rho0, @@ -26,6 +29,8 @@ def __init__(self, self.alpha = alpha return def __call__(self, r): + #if r.y > 1.0: + # print self.rho0*exp(self.alpha*(r.y - self.y1)) return self.rho0*exp(self.alpha*(r.y - self.y1)) #------------------------------------------------------------------------------- @@ -35,6 +40,9 @@ def __call__(self, r): ny1 = 100, nx2 = 100, ny2 = 100, + refineFactor = 1, + nybound = 8, # number of layers in const node bc + rho0 = 1.0, eps0 = 1.0, x0 = 0.0, @@ -47,53 +55,94 @@ def __call__(self, r): vx1 = 0.0, vx2 = 0.0, freq = 1.0, - alpha = 0.01, # amplitude of displacement + alpha = 0.0025, # amplitude of displacement beta = 5.0, # speed at which displacement decays away from midline - S = 3.0, # density jump at surface - g0 = -2.0, - w0 = 0.1, + S = 10.0, # density jump at surface + g0 = -0.5, + w0 = 0.005, sigma = 0.05/sqrt(2.0), gamma = 5.0/3.0, mu = 1.0, - nPerh = 1.51, + useHydrostaticBoundary = True, + + # kernel options + HUpdate = IdealH, + KernelConstructor = WendlandC2Kernel, + order = 3, + nPerh = 3.0, + hmin = 0.0001, + hmax = 0.5, + hminratio = 0.1, + + # hydro type + svph = False, + crksph = False, + psph = False, + fsisph = False, + gsph = False, + mfm = False, + + # hydro options + asph = False, + xsph = False, + solid = False, + filter = 0.0, + densityUpdate = IntegrateDensity, + compatibleEnergy = True, + evolveTotalEnergy = False, + useVelocityMagnitudeForDt = False, + correctVelocityGradient = False, + epsilonTensile = 0.0, + nTensile = 8, + + # SPH/PSPH options + gradhCorrection = False, - SVPH = False, - CRKSPH = False, - ASPH = False, - SPH = True, # This just chooses the H algorithm -- you can use this with CRKSPH for instance. - filter = 0.0, # CRKSPH filtering - Qconstructor = MonaghanGingoldViscosity, - #Qconstructor = TensorMonaghanGingoldViscosity, - linearConsistent = False, + # svph options fcentroidal = 0.0, fcellPressure = 0.0, - boolReduceViscosity = False, - nh = 5.0, - aMin = 0.1, - aMax = 2.0, - Qhmult = 1.0, + linearConsistent = False, + + # FSISPH parameters + fsiSurfaceCoefficient = 0.00, # adds additional repulsive force to material interfaces) + fsiRhoStabilizeCoeff = 0.1, # coefficient that smooths the density field + fsiEpsDiffuseCoeff = 0.1, # explicit diiffusion of the thermal energy + fsiXSPHCoeff = 0.00, # fsi uses multiplier for XSPH instead of binary switch + fsiInterfaceMethod = ModulusInterface, # (HLLCInterface, ModulusInterface) + fsiKernelMethod = NeverAverageKernels, # (NeverAverageKernels, AlwaysAverageKernels, AverageInterfaceKernels) + + # GSPH/MFM parameters + gsphEpsDiffuseCoeff = 0.0, + gsphLinearCorrect = True, + LimiterConstructor = VanLeerLimiter, + WaveSpeedConstructor = DavisWaveSpeed, + + # artificial viscosity options + Qconstructor = LimitedMonaghanGingoldViscosity, Cl = 1.0, Cq = 1.0, linearInExpansion = False, Qlimiter = False, balsaraCorrection = False, epsilon2 = 1e-2, - hmin = 0.0001, - hmax = 0.5, - hminratio = 0.1, - cfl = 0.5, - useVelocityMagnitudeForDt = False, - XSPH = False, - epsilonTensile = 0.0, - nTensile = 8, + + boolReduceViscosity = False, + nh = 5.0, + aMin = 0.1, + aMax = 2.0, + Qhmult = 1.0, + # artificial conduction options + bArtificialConduction = False, + arCondAlpha = 0.5, + + # integrator & options IntegratorConstructor = CheapSynchronousRK2Integrator, + cfl = 0.25, goalTime = 2.0, steps = None, - vizCycle = None, - vizTime = 0.01, dt = 0.0001, dtMin = 1.0e-8, dtMax = 0.1, @@ -101,54 +150,62 @@ def __call__(self, r): maxSteps = None, statsStep = 10, smoothIters = 0, - HUpdate = IdealH, domainIndependent = False, rigorousBoundaries = False, dtverbose = False, - densityUpdate = RigorousSumDensity, # VolumeScaledDensity, - compatibleEnergy = True, # <--- Important! rigorousBoundaries does not work with the compatibleEnergy algorithm currently. - gradhCorrection = False, - + # output options + vizCycle = None, + vizTime = 0.1, useVoronoiOutput = False, clearDirectories = False, restoreCycle = None, restartStep = 100, - redistributeStep = 500, + redistributeStep = 50000, checkRestart = False, dataDir = "dumps-Rayleigh-Taylor-2d", outputFile = "None", comparisonFile = "None", serialDump = False, #whether to dump a serial ascii file at the end for viz - - bArtificialConduction = False, - arCondAlpha = 0.5, ) + +assert not (compatibleEnergy and evolveTotalEnergy) +assert sum([fsisph,psph,gsph,crksph,svph,mfm])<=1 +assert not (fsisph and not solid) +assert not ((mfm or gsph) and (boolReduceViscosity)) + # Decide on our hydro algorithm. -if SVPH: - if ASPH: - HydroConstructor = ASVPHFacetedHydro - else: - HydroConstructor = SVPHFacetedHydro -elif CRKSPH: - if ASPH: - HydroConstructor = ACRKSPHHydro - else: - HydroConstructor = CRKSPHHydro -else: - if ASPH: - HydroConstructor = ASPHHydro - else: - HydroConstructor = SPHHydro +hydroname = 'SPH' +useArtificialViscosity=True + +if svph: + hydroname = "SVPH" +elif crksph: + Qconstructor = LimitedMonaghanGingoldViscosity + hydroname = "CRK"+hydroname +elif psph: + hydroname = "P"+hydroname +elif fsisph: + hydroname = "FSI"+hydroname +elif gsph: + hydroname = "G"+hydroname + useArtificialViscosity=False +elif mfm: + hydroname = "MFM" + useArtificialViscosity=False +if asph: + hydorname = "A"+hydroname +if solid: + hydroname = "solid"+hydroname dataDir = os.path.join(dataDir, "S=%g" % (S), "vx1=%g-vx2=%g" % (abs(vx1), abs(vx2)), - str(HydroConstructor).split("'")[1].split(".")[-1], + hydroname, "densityUpdate=%s" % (densityUpdate), - "XSPH=%s" % XSPH, + "XSPH=%s" % xsph, "filter=%s" % filter, "%s-Cl=%g-Cq=%g" % (str(Qconstructor).split("'")[1].split(".")[-1], Cl, Cq), "%ix%i" % (nx1, ny1 + ny2), @@ -158,16 +215,10 @@ def __call__(self, r): restartBaseName = os.path.join(restartDir, "Rayleigh-Taylor-2d") vizBaseName = "Rayleigh-Taylor-2d" -#------------------------------------------------------------------------------- -# CRKSPH Switches to ensure consistency -#------------------------------------------------------------------------------- -if CRKSPH: - Qconstructor = LimitedMonaghanGingoldViscosity #------------------------------------------------------------------------------- # Check if the necessary output directories exist. If not, create them. #------------------------------------------------------------------------------- -import os, sys if mpi.rank == 0: if clearDirectories and os.path.exists(dataDir): shutil.rmtree(dataDir) @@ -191,19 +242,27 @@ def __call__(self, r): #------------------------------------------------------------------------------- # Interpolation kernels. #------------------------------------------------------------------------------- -WT = TableKernel(BSplineKernel(), 1000) +if KernelConstructor==NBSplineKernel: + WT = TableKernel(NBSplineKernel(order), 1000) +else: + WT = TableKernel(KernelConstructor(), 1000) output("WT") kernelExtent = WT.kernelExtent #------------------------------------------------------------------------------- # Make the NodeList. #------------------------------------------------------------------------------- -nodes1 = makeFluidNodeList("High density gas", eos, +if solid: + nodeListConstructor=makeSolidNodeList +else: + nodeListConstructor=makeFluidNodeList + +nodes1 = nodeListConstructor("Low density gas", eos, hmin = hmin, hmax = hmax, hminratio = hminratio, nPerh = nPerh) -nodes2 = makeFluidNodeList("Low density gas", eos, +nodes2 = nodeListConstructor("High density gas", eos, hmin = hmin, hmax = hmax, hminratio = hminratio, @@ -216,35 +275,47 @@ def __call__(self, r): output("nodes.hminratio") output("nodes.nodesPerSmoothingScale") + +#------------------------------------------------------------------------------- +# functions for ICs. +#------------------------------------------------------------------------------- +eps1=eps0 +eps2=eps1/S + +lowerDensity = ExponentialProfile(y1, + rho0/S, + g0/((gamma - 1.0)*eps1)) +upperDensity = ExponentialProfile(y1, + rho0, + g0/((gamma - 1.0)*eps2)) + + #------------------------------------------------------------------------------- # Set the node properties. #------------------------------------------------------------------------------- + if restoreCycle is None: - generator1 = GenerateNodeDistribution2d(nx1, ny1, - rho = ExponentialDensity(y1, - rho0/S, - g0/((gamma - 1.0)*eps0)), - distributionType = "lattice", - xmin = (x0,y0), + nx1 *= refineFactor + ny1 *= refineFactor + nx2 *= refineFactor + ny2 *= refineFactor + dy = (y1 - y0)/ny1 + + generator1 = GenerateNodeDistribution2d(nx1, ny1+nybound, + rho = lowerDensity, + distributionType = "xstaggeredLattice", + xmin = (x0,y0-nybound*dy), xmax = (x1,y1), - nNodePerh = nPerh, SPH = SPH) - generator2 = GenerateNodeDistribution2d(nx2, ny2, - rho = ExponentialDensity(y1, - rho0, - g0*S/((gamma - 1.0)*eps0)), - distributionType = "lattice", + generator2 = GenerateNodeDistribution2d(nx2, ny2+nybound, + rho = upperDensity, + distributionType = "xstaggeredLattice", xmin = (x0,y1), - xmax = (x1,y2), + xmax = (x1,y2+nybound*dy), nNodePerh = nPerh, SPH = SPH) - if mpi.procs > 1: - from VoronoiDistributeNodes import distributeNodes2d - else: - from DistributeNodes import distributeNodes2d - distributeNodes2d((nodes1, generator1), (nodes2, generator2)) @@ -275,64 +346,114 @@ def dy(ri): #------------------------------------------------------------------------------- # Construct the artificial viscosity. #------------------------------------------------------------------------------- -q = Qconstructor(Cl, Cq, linearInExpansion) -q.epsilon2 = epsilon2 -q.limiter = Qlimiter -q.balsaraShearCorrection = balsaraCorrection -output("q") -output("q.Cl") -output("q.Cq") -output("q.epsilon2") -output("q.limiter") -output("q.balsaraShearCorrection") -output("q.linearInExpansion") -output("q.quadraticInExpansion") +if useArtificialViscosity: + q = Qconstructor(Cl, Cq, linearInExpansion) + q.epsilon2 = epsilon2 + q.limiter = Qlimiter + q.balsaraShearCorrection = balsaraCorrection + output("q") + output("q.Cl") + output("q.Cq") + output("q.epsilon2") + output("q.limiter") + output("q.balsaraShearCorrection") + output("q.linearInExpansion") + output("q.quadraticInExpansion") #------------------------------------------------------------------------------- # Construct the hydro physics object. #------------------------------------------------------------------------------- -if SVPH: - hydro = HydroConstructor(W = WT, - Q = q, - cfl = cfl, - useVelocityMagnitudeForDt = useVelocityMagnitudeForDt, - compatibleEnergyEvolution = compatibleEnergy, - densityUpdate = densityUpdate, - XSVPH = XSPH, - linearConsistent = linearConsistent, - generateVoid = False, - HUpdate = HUpdate, - fcentroidal = fcentroidal, - fcellPressure = fcellPressure, - xmin = Vector(-2.0, -2.0), - xmax = Vector(3.0, 3.0)) -# xmin = Vector(x0 - 0.5*(x2 - x0), y0 - 0.5*(y2 - y0)), -# xmax = Vector(x2 + 0.5*(x2 - x0), y2 + 0.5*(y2 - y0))) -elif CRKSPH: - hydro = HydroConstructor(W = WT, - Q = q, - filter = filter, - cfl = cfl, - useVelocityMagnitudeForDt = useVelocityMagnitudeForDt, - compatibleEnergyEvolution = compatibleEnergy, - XSPH = XSPH, - densityUpdate = densityUpdate, - HUpdate = HUpdate) +if crksph: + hydro = CRKSPH(dataBase = db, + cfl = cfl, + filter = filter, + epsTensile = epsilonTensile, + nTensile = nTensile, + compatibleEnergyEvolution = compatibleEnergy, + evolveTotalEnergy = evolveTotalEnergy, + XSPH = xsph, + densityUpdate = densityUpdate, + HUpdate = HUpdate) +elif psph: + hydro = PSPH(dataBase = db, + cfl = cfl, + W = WT, + Q = q, + filter = filter, + compatibleEnergyEvolution = compatibleEnergy, + evolveTotalEnergy = evolveTotalEnergy, + densityUpdate = densityUpdate, + HUpdate = HUpdate, + XSPH = xsph, + ASPH = asph) +if fsisph: + sumDensityNodeListSwitch =[nodes1,nodes2] + hydro = FSISPH(dataBase = db, + Q=q, + W = WT, + cfl = cfl, + surfaceForceCoefficient = fsiSurfaceCoefficient, + densityStabilizationCoefficient = fsiRhoStabilizeCoeff, + specificThermalEnergyDiffusionCoefficient = fsiEpsDiffuseCoeff, + xsphCoefficient = fsiXSPHCoeff, + interfaceMethod = fsiInterfaceMethod, + kernelAveragingMethod = fsiKernelMethod, + sumDensityNodeLists = sumDensityNodeListSwitch, + correctVelocityGradient = correctVelocityGradient, + compatibleEnergyEvolution = compatibleEnergy, + evolveTotalEnergy = evolveTotalEnergy, + ASPH = asph, + epsTensile = epsilonTensile) +elif gsph: + limiter = LimiterConstructor() + waveSpeed = WaveSpeedConstructor() + solver = HLLC(limiter,waveSpeed,gsphLinearCorrect) + hydro = GSPH(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient= correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + densityUpdate=densityUpdate, + XSPH = xsph, + ASPH = asph, + epsTensile = epsilonTensile, + nTensile = nTensile) +elif mfm: + limiter = LimiterConstructor() + waveSpeed = WaveSpeedConstructor() + solver = HLLC(limiter,waveSpeed,gsphLinearCorrect) + hydro = MFM(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient= correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + densityUpdate=densityUpdate, + XSPH = xsph, + ASPH = asph, + epsTensile = epsilonTensile, + nTensile = nTensile) else: - hydro = HydroConstructor(W = WT, - Q = q, - cfl = cfl, - useVelocityMagnitudeForDt = useVelocityMagnitudeForDt, - compatibleEnergyEvolution = compatibleEnergy, - gradhCorrection = gradhCorrection, - XSPH = XSPH, - densityUpdate = densityUpdate, - HUpdate = HUpdate, - epsTensile = epsilonTensile, - nTensile = nTensile) + hydro = SPH(dataBase = db, + cfl = cfl, + W = WT, + Q = q, + compatibleEnergyEvolution = compatibleEnergy, + evolveTotalEnergy = evolveTotalEnergy, + gradhCorrection = gradhCorrection, + correctVelocityGradient = correctVelocityGradient, + XSPH = xsph, + ASPH = asph, + densityUpdate = densityUpdate, + HUpdate = HUpdate, + epsTensile = epsilonTensile, + nTensile = nTensile) + output("hydro") output("hydro.kernel()") -output("hydro.PiKernel()") output("hydro.cfl") output("hydro.compatibleEnergyEvolution") output("hydro.densityUpdate") @@ -344,9 +465,8 @@ def dy(ri): # Construct the MMRV physics object. #------------------------------------------------------------------------------- -if boolReduceViscosity: +if boolReduceViscosity and useArtificialViscosity: evolveReducingViscosityMultiplier = MorrisMonaghanReducingViscosity(q,nh,aMin,aMax) - packages.append(evolveReducingViscosityMultiplier) #------------------------------------------------------------------------------- @@ -354,9 +474,7 @@ def dy(ri): #------------------------------------------------------------------------------- if bArtificialConduction: - #q.reducingViscosityCorrection = True - ArtyCond = ArtificialConduction(WT,arCondAlpha) - + ArtyCond = ArtificialConduction(WT,arCondAlpha) packages.append(ArtyCond) #------------------------------------------------------------------------------- @@ -370,9 +488,6 @@ def dy(ri): for i in range(nodes2.numInternalNodes): nodeIndicies2.append(i) -#nodeIndicies1.extend(range(nodes1.numInternalNodes)) -#nodeIndicies2.extend(range(nodes2.numInternalNodes)) - gravity1 = ConstantAcceleration2d(Vector2d(0.0, g0), nodes1, nodeIndicies1) @@ -391,11 +506,23 @@ def dy(ri): yp1 = Plane(Vector(x0, y0), Vector(0.0, 1.0)) yp2 = Plane(Vector(x0, y2), Vector(0.0, -1.0)) xbc = PeriodicBoundary(xp1, xp2) -#ybc = PeriodicBoundary(yp1, yp2) -ybc1 = ReflectingBoundary(yp1) -ybc2 = ReflectingBoundary(yp2) -bcSet = [xbc, ybc1, ybc2] -#bcSet = [xbc,ybc1] + +pos = nodes1.positions() +ylow, yhigh = vector_of_int(), vector_of_int() +for i in xrange(nodes1.numInternalNodes): + if pos[i].y < y0: + ylow.append(i) + +pos = nodes2.positions() +for i in xrange(nodes2.numInternalNodes): + if pos[i].y > y2: + yhigh.append(i) + +print(yhigh) +ybc1 = ConstantBoundary(db, nodes1, ylow, yp1) +ybc2 = ConstantBoundary(db, nodes2, yhigh, yp2) + +bcSet = [ybc1, ybc2, xbc] for bc in bcSet: for p in packages: @@ -443,6 +570,7 @@ def dy(ri): redistributeStep = redistributeStep, vizMethod = vizMethod, vizBaseName = vizBaseName, + vizGhosts=True, vizDir = vizDir, vizStep = vizCycle, vizTime = vizTime, diff --git a/tests/functional/Hydro/RayleighTaylor/RT-2d_Hopkins.py b/tests/functional/Hydro/RayleighTaylor/RT-2d_Hopkins.py index 5946445ec..3175acd48 100644 --- a/tests/functional/Hydro/RayleighTaylor/RT-2d_Hopkins.py +++ b/tests/functional/Hydro/RayleighTaylor/RT-2d_Hopkins.py @@ -5,7 +5,7 @@ #------------------------------------------------------------------------------- # This is the basic Rayleigh-Taylor Problem #------------------------------------------------------------------------------- -import shutil +import shutil, os, sys from math import * from Spheral2d import * from SpheralTestUtilities import * @@ -18,7 +18,10 @@ from RTMixLength import RTMixLength import mpi -import DistributeNodes +if mpi.procs > 1: + from PeanoHilbertDistributeNodes import distributeNodes2d +else: + from DistributeNodes import distributeNodes2d class ExponentialDensity: def __init__(self, @@ -46,25 +49,72 @@ def __call__(self, r): y0 = 0.0, y1 = 1.0, gval = -0.5, - w0 = 0.025, + w0 = 0.025, delta = 0.025, gamma = 1.4, mu = 1.0, - nPerh = 1.01, - - SVPH = False, - CRKSPH = False, - PSPH = False, - SPH = True, # This just chooses the H algorithm -- you can use this with CRKSPH for instance. - filter = 0.0, # CRKSPH filtering - Qconstructor = MonaghanGingoldViscosity, - #Qconstructor = TensorMonaghanGingoldViscosity, - KernelConstructor = BSplineKernel, + # kernel options + KernelConstructor = WendlandC2Kernel, + HUpdate = IdealH, + nPerh = 3.01, order = 5, + hmin = 0.0001, + hmax = 0.5, + hminratio = 0.1, + + # hydros + svph = False, + crksph = False, + psph = False, + fsisph = False, + gsph = False, + mfm = False, + + # general hydro options + asph = False, + xsph = False, + solid = False, + filter = 0.0, + epsilonTensile = 0.0, + nTensile = 8, + useVelocityMagnitudeForDt = False, + densityUpdate = IntegrateDensity, + compatibleEnergy = True, + correctVelocityGradient = True, + evolveTotalEnergy= False, + + # SPH/PSPH options + gradhCorrection = True, + HopkinsConductivity=False, + + # svph options linearConsistent = False, fcentroidal = 0.0, fcellPressure = 0.0, + + # crksph options + correctionOrder = LinearOrder, + + # FSISPH parameters + fsiSurfaceCoefficient = 0.00, # adds additional repulsive force to material interfaces) + fsiRhoStabilizeCoeff = 0.1, # coefficient that smooths the density field + fsiEpsDiffuseCoeff = 0.1, # explicit diiffusion of the thermal energy + fsiXSPHCoeff = 0.00, # fsi uses multiplier for XSPH instead of binary switch + fsiInterfaceMethod = ModulusInterface, # (HLLCInterface, ModulusInterface) + fsiKernelMethod = NeverAverageKernels, # (NeverAverageKernels, AlwaysAverageKernels, AverageInterfaceKernels) + + # GSPH/MFM parameters + gsphEpsDiffuseCoeff = 0.0, + gsphLinearCorrect = True, + LimiterConstructor = VanLeerLimiter, + WaveSpeedConstructor = DavisWaveSpeed, + riemannGradientType = HydroAccelerationGradient, + + # artificial viscosity + Qconstructor = LimitedMonaghanGingoldViscosity, + Cl = 1.0, + Cq = 1.0, boolReduceViscosity = False, nh = 5.0, aMin = 0.1, @@ -78,22 +128,13 @@ def __call__(self, r): betaE = 1.0, fKern = 1.0/3.0, boolHopkinsCorrection = True, - Cl = 1.0, - Cq = 1.0, linearInExpansion = False, Qlimiter = False, balsaraCorrection = False, epsilon2 = 1e-2, - hmin = 0.0001, - hmax = 0.5, - hminratio = 0.1, - cfl = 0.5, - useVelocityMagnitudeForDt = False, - XSPH = False, - epsilonTensile = 0.0, - nTensile = 8, IntegratorConstructor = CheapSynchronousRK2Integrator, + cfl = 0.5, goalTime = 4.0, steps = None, vizCycle = None, @@ -105,18 +146,9 @@ def __call__(self, r): maxSteps = None, statsStep = 10, smoothIters = 0, - HUpdate = IdealH, domainIndependent = False, rigorousBoundaries = False, dtverbose = False, - - correctionOrder = LinearOrder, - densityUpdate = RigorousSumDensity, # VolumeScaledDensity, - compatibleEnergy = True, # <--- Important! rigorousBoundaries does not work with the compatibleEnergy algorithm currently. - gradhCorrection = True, - correctVelocityGradient = True, - evolveTotalEnergy= False, - HopkinsConductivity=False, clearDirectories = False, restoreCycle = -1, @@ -134,39 +166,44 @@ def __call__(self, r): arCondAlpha = 0.5, ) +assert not (compatibleEnergy and evolveTotalEnergy) +assert sum([fsisph,psph,gsph,crksph,svph,mfm])<=1 +assert not (fsisph and not solid) +assert not ((mfm or gsph) and (boolReduceViscosity or boolReduceViscosity)) assert not(boolReduceViscosity and boolCullenViscosity) -# Decide on our hydro algorithm. -if SVPH: - if SPH: - HydroConstructor = SVPHFacetedHydro - else: - HydroConstructor = ASVPHFacetedHydro -elif CRKSPH: + +hydroname = 'SPH' +useArtificialViscosity=True + +if svph: + hydroname = "SVPH" +elif crksph: Qconstructor = LimitedMonaghanGingoldViscosity - if SPH: - HydroConstructor = CRKSPHHydro - else: - HydroConstructor = ACRKSPHHydro -elif PSPH: - if SPH: - HydroConstructor = PSPHHydro - else: - HydroConstructor = APSPHHydro -else: - if SPH: - HydroConstructor = SPHHydro - else: - HydroConstructor = ASPHHydro + hydroname = "CRK"+hydroname +elif psph: + hydroname = "P"+hydroname +elif fsisph: + hydroname = "FSI"+hydroname +elif gsph: + hydroname = "G"+hydroname + useArtificialViscosity=False +elif mfm: + hydroname = "MFM" + useArtificialViscosity=False +if asph: + hydorname = "A"+hydroname +if solid: + hydroname = "solid"+hydroname dataDir = os.path.join(dataDir, "gval=%g" % (gval), "w0=%g" % w0, - HydroConstructor.__name__, + hydroname, Qconstructor.__name__, KernelConstructor.__name__, "densityUpdate=%s" % (densityUpdate), "correctionOrder=%s" % (correctionOrder), - "XSPH=%s" % XSPH, + "XSPH=%s" % xsph, "filter=%s" % filter, "compatible=%s" % compatibleEnergy, "Cullen=%s" % boolCullenViscosity, @@ -181,7 +218,6 @@ def __call__(self, r): #------------------------------------------------------------------------------- # Check if the necessary output directories exist. If not, create them. #------------------------------------------------------------------------------- -import os, sys if mpi.rank == 0: if clearDirectories and os.path.exists(dataDir): shutil.rmtree(dataDir) @@ -201,18 +237,20 @@ def __call__(self, r): #------------------------------------------------------------------------------- if KernelConstructor==NBSplineKernel: WT = TableKernel(NBSplineKernel(order), 10000) - WTPi = TableKernel(NBSplineKernel(order), 10000, Qhmult) else: WT = TableKernel(KernelConstructor(), 10000) - WTPi = TableKernel(KernelConstructor(), 10000, Qhmult) output("WT") -output("WTPi") kernelExtent = WT.kernelExtent #------------------------------------------------------------------------------- # Make the NodeList. #------------------------------------------------------------------------------- -nodes = makeFluidNodeList("High density gas", eos, +if solid: + nodeListConstructor=makeSolidNodeList +else: + nodeListConstructor=makeFluidNodeList + +nodes = nodeListConstructor("High density gas", eos, hmin = hmin, hmax = hmax, hminratio = hminratio, @@ -240,11 +278,6 @@ def __call__(self, r): nNodePerh = nPerh, SPH = SPH) -if mpi.procs > 1: - from VoronoiDistributeNodes import distributeNodes2d -else: - from DistributeNodes import distributeNodes2d - distributeNodes2d((nodes, generator)) #Set IC @@ -276,78 +309,117 @@ def __call__(self, r): #------------------------------------------------------------------------------- # Construct the artificial viscosity. #------------------------------------------------------------------------------- -q = Qconstructor(Cl, Cq, linearInExpansion) -q.epsilon2 = epsilon2 -q.limiter = Qlimiter -q.balsaraShearCorrection = balsaraCorrection -output("q") -output("q.Cl") -output("q.Cq") -output("q.epsilon2") -output("q.limiter") -output("q.balsaraShearCorrection") -output("q.linearInExpansion") -output("q.quadraticInExpansion") +if useArtificialViscosity: + q = Qconstructor(Cl, Cq, linearInExpansion) + q.epsilon2 = epsilon2 + q.limiter = Qlimiter + q.balsaraShearCorrection = balsaraCorrection + output("q") + output("q.Cl") + output("q.Cq") + output("q.epsilon2") + output("q.limiter") + output("q.balsaraShearCorrection") + output("q.linearInExpansion") + output("q.quadraticInExpansion") + #------------------------------------------------------------------------------- # Construct the hydro physics object. #------------------------------------------------------------------------------- -if SVPH: - hydro = HydroConstructor(W = WT, - Q = q, - cfl = cfl, - useVelocityMagnitudeForDt = useVelocityMagnitudeForDt, - compatibleEnergyEvolution = compatibleEnergy, - densityUpdate = densityUpdate, - XSVPH = XSPH, - linearConsistent = linearConsistent, - generateVoid = False, - HUpdate = HUpdate, - fcentroidal = fcentroidal, - fcellPressure = fcellPressure, - xmin = Vector(-2.0, -2.0), - xmax = Vector(3.0, 3.0)) -elif CRKSPH: - hydro = HydroConstructor(W = WT, - WPi = WTPi, - Q = q, - filter = filter, - cfl = cfl, - useVelocityMagnitudeForDt = useVelocityMagnitudeForDt, - compatibleEnergyEvolution = compatibleEnergy, - XSPH = XSPH, - correctionOrder = correctionOrder, - densityUpdate = densityUpdate, - HUpdate = HUpdate) -elif PSPH: - hydro = HydroConstructor(W = WT, - Q = q, - filter = filter, - cfl = cfl, - compatibleEnergyEvolution = compatibleEnergy, - evolveTotalEnergy = evolveTotalEnergy, - HopkinsConductivity = HopkinsConductivity, - densityUpdate = densityUpdate, - correctVelocityGradient = correctVelocityGradient, - HUpdate = HUpdate, - XSPH = XSPH) +if crksph: + hydro = CRKSPH(dataBase = db, + cfl = cfl, + filter = filter, + epsTensile = epsilonTensile, + nTensile = nTensile, + compatibleEnergyEvolution = compatibleEnergy, + evolveTotalEnergy = evolveTotalEnergy, + XSPH = xsph, + densityUpdate = densityUpdate, + HUpdate = HUpdate) +elif psph: + hydro = PSPH(dataBase = db, + cfl = cfl, + W = WT, + Q = q, + filter = filter, + compatibleEnergyEvolution = compatibleEnergy, + evolveTotalEnergy = evolveTotalEnergy, + densityUpdate = densityUpdate, + HUpdate = HUpdate, + XSPH = xsph, + ASPH = asph) +if fsisph: + sumDensityNodeListSwitch =[nodes] + hydro = FSISPH(dataBase = db, + Q=q, + W = WT, + cfl = cfl, + surfaceForceCoefficient = fsiSurfaceCoefficient, + densityStabilizationCoefficient = fsiRhoStabilizeCoeff, + specificThermalEnergyDiffusionCoefficient = fsiEpsDiffuseCoeff, + xsphCoefficient = fsiXSPHCoeff, + interfaceMethod = fsiInterfaceMethod, + kernelAveragingMethod = fsiKernelMethod, + sumDensityNodeLists = sumDensityNodeListSwitch, + correctVelocityGradient = correctVelocityGradient, + compatibleEnergyEvolution = compatibleEnergy, + evolveTotalEnergy = evolveTotalEnergy, + ASPH = asph, + epsTensile = epsilonTensile) +elif gsph: + limiter = LimiterConstructor() + waveSpeed = WaveSpeedConstructor() + solver = HLLC(limiter,waveSpeed,gsphLinearCorrect) + hydro = GSPH(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + gradientType = riemannGradientType, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient= correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + densityUpdate=densityUpdate, + XSPH = xsph, + ASPH = asph, + epsTensile = epsilonTensile, + nTensile = nTensile) +elif mfm: + limiter = LimiterConstructor() + waveSpeed = WaveSpeedConstructor() + solver = HLLC(limiter,waveSpeed,gsphLinearCorrect) + hydro = MFM(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + gradientType = riemannGradientType, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient= correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + densityUpdate=densityUpdate, + XSPH = xsph, + ASPH = asph, + epsTensile = epsilonTensile, + nTensile = nTensile) else: - hydro = HydroConstructor(W = WT, - WPi = WTPi, - Q = q, - cfl = cfl, - useVelocityMagnitudeForDt = useVelocityMagnitudeForDt, - compatibleEnergyEvolution = compatibleEnergy, - gradhCorrection = gradhCorrection, - correctVelocityGradient = correctVelocityGradient, - evolveTotalEnergy = evolveTotalEnergy, - XSPH = XSPH, - densityUpdate = densityUpdate, - HUpdate = HUpdate, - epsTensile = epsilonTensile) + hydro = SPH(dataBase = db, + cfl = cfl, + W = WT, + Q = q, + compatibleEnergyEvolution = compatibleEnergy, + evolveTotalEnergy = evolveTotalEnergy, + gradhCorrection = gradhCorrection, + correctVelocityGradient = correctVelocityGradient, + XSPH = xsph, + ASPH = asph, + densityUpdate = densityUpdate, + HUpdate = HUpdate, + epsTensile = epsilonTensile, + nTensile = nTensile) + output("hydro") output("hydro.kernel()") -output("hydro.PiKernel()") output("hydro.cfl") output("hydro.compatibleEnergyEvolution") output("hydro.densityUpdate") @@ -358,10 +430,10 @@ def __call__(self, r): #------------------------------------------------------------------------------- # Construct the MMRV physics object. #------------------------------------------------------------------------------- -if boolReduceViscosity: +if boolReduceViscosity and useArtificialViscosity: evolveReducingViscosityMultiplier = MorrisMonaghanReducingViscosity(q,nh,aMin,aMax) packages.append(evolveReducingViscosityMultiplier) -elif boolCullenViscosity: +elif boolCullenViscosity and useArtificialViscosity: evolveCullenViscosityMultiplier = CullenDehnenViscosity(q,WTPi,alphMax,alphMin,betaC,betaD,betaE,fKern,boolHopkinsCorrection) packages.append(evolveCullenViscosityMultiplier) @@ -407,8 +479,8 @@ def __call__(self, r): ylow.append(i) elif pos[i].y > y1: yhigh.append(i) -ybc1 = ConstantBoundary(nodes, ylow, yp1) -ybc2 = ConstantBoundary(nodes, yhigh, yp2) +ybc1 = ConstantBoundary(db,nodes, ylow, yp1) +ybc2 = ConstantBoundary(db,nodes, yhigh, yp2) bcSet = [ybc1, ybc2, xbc] # <-- ybc should be first! @@ -465,7 +537,7 @@ def __call__(self, r): restartBaseName = restartBaseName, restoreCycle = restoreCycle, redistributeStep = None, - vizMethod = vizMethod, + #vizMethod = vizMethod, vizBaseName = vizBaseName, vizDir = vizDir, vizStep = vizCycle, diff --git a/tests/functional/Hydro/Sedov/Sedov-cylindrical-2d.py b/tests/functional/Hydro/Sedov/Sedov-cylindrical-2d.py index e63fb8d82..29a6472f4 100644 --- a/tests/functional/Hydro/Sedov/Sedov-cylindrical-2d.py +++ b/tests/functional/Hydro/Sedov/Sedov-cylindrical-2d.py @@ -1,14 +1,19 @@ #------------------------------------------------------------------------------- # The cylindrical Sedov test case (2-D). #------------------------------------------------------------------------------- -import os, sys, shutil +import os, sys, shutil, mpi from Spheral2d import * from SpheralTestUtilities import * #from SpheralGnuPlotUtilities import * from GenerateNodeDistribution2d import * from CubicNodeGenerator import GenerateSquareNodeDistribution -import mpi +if mpi.procs > 1: + from VoronoiDistributeNodes import distributeNodes2d + #from PeanoHilbertDistributeNodes import distributeNodes2d +else: + from DistributeNodes import distributeNodes2d + title("2-D integrated hydro test -- planar Sedov problem") #------------------------------------------------------------------------------- @@ -22,8 +27,8 @@ nTheta = 50, rmin = 0.0, rmax = 1.0, - nPerh = 1.51, - order = 5, + nPerh = 1.00, + order = 3, rho0 = 1.0, eps0 = 0.0, @@ -46,6 +51,8 @@ psph = False, fsisph = False, gsph = False, + mfm = False, + mfv = False, # hydro options solid = False, @@ -63,7 +70,7 @@ volumeType = RKSumVolume, # gsph options - RiemannGradientType = RiemannGradient, # (RiemannGradient,SPHGradient,HydroAccelerationGradient,OnlyDvDxGradient,MixedMethodGradient) + RiemannGradientType = SPHGradient, # (RiemannGradient,SPHGradient,HydroAccelerationGradient,OnlyDvDxGradient,MixedMethodGradient) linearReconstruction = True, # Artifical Viscosity @@ -95,9 +102,11 @@ statsStep = 1, smoothIters = 0, useVelocityMagnitudeForDt = False, + dtverbose = False, # IO vizCycle = None, + vizDerivs = False, vizTime = 0.1, restoreCycle = -1, restartStep = 1000, @@ -117,7 +126,7 @@ assert not(boolReduceViscosity and boolCullenViscosity) assert thetaFactor in (0.5, 1.0, 2.0) -assert not(gsph and (boolReduceViscosity or boolCullenViscosity)) +assert not((gsph or mfm or mfv) and (boolReduceViscosity or boolCullenViscosity)) assert not(fsisph and not solid) theta = thetaFactor * pi @@ -154,6 +163,10 @@ hydroname = "FSISPH" elif gsph: hydroname = "GSPH" +elif mfm: + hydroname = "MFM" +elif mfv: + hydroname = "MFV" else: hydroname = "SPH" if asph: @@ -175,7 +188,6 @@ #------------------------------------------------------------------------------- # Check if the necessary output directories exist. If not, create them. #------------------------------------------------------------------------------- -import os, sys if mpi.rank == 0: if clearDirectories and os.path.exists(dataDir): shutil.rmtree(dataDir) @@ -249,12 +261,6 @@ nNodePerh = nPerh, SPH = (not ASPH)) -if mpi.procs > 1: - from VoronoiDistributeNodes import distributeNodes2d - #from PeanoHilbertDistributeNodes import distributeNodes2d -else: - from DistributeNodes import distributeNodes2d - distributeNodes2d((nodes1, generator)) output("mpi.reduce(nodes1.numInternalNodes, mpi.MIN)") output("mpi.reduce(nodes1.numInternalNodes, mpi.MAX)") @@ -348,6 +354,42 @@ compatibleEnergyEvolution = compatibleEnergy, correctVelocityGradient= correctVelocityGradient, evolveTotalEnergy = evolveTotalEnergy, + gradientType = RiemannGradientType, + XSPH = XSPH, + ASPH = asph, + densityUpdate=densityUpdate, + HUpdate = HUpdate) +elif mfm: + limiter = VanLeerLimiter() + waveSpeed = DavisWaveSpeed() + solver = HLLC(limiter,waveSpeed,linearReconstruction) + hydro = MFM(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + specificThermalEnergyDiffusionCoefficient = 0.00, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient= correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + gradientType = RiemannGradientType, + XSPH = XSPH, + ASPH = asph, + densityUpdate=densityUpdate, + HUpdate = HUpdate) +elif mfv: + limiter = VanLeerLimiter() + waveSpeed = DavisWaveSpeed() + solver = HLLC(limiter,waveSpeed,linearReconstruction) + hydro = MFV(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + specificThermalEnergyDiffusionCoefficient = 0.00, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient= correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + gradientType = RiemannGradientType, + nodeMotionType=NodeMotionType.Fician, XSPH = XSPH, ASPH = asph, densityUpdate=densityUpdate, @@ -386,8 +428,10 @@ output("hydro.densityUpdate") output("hydro.HEvolution") -if not gsph: +if not (gsph or mfm or mfv): q = hydro.Q + q.Cq = 2 + q.Cl = 2 output("q") output("q.Cl") output("q.Cq") @@ -433,6 +477,7 @@ if dtMax: integrator.dtMax = dtMax integrator.dtGrowth = dtGrowth +integrator.verbose = dtverbose integrator.allowDtCheck = True output("integrator") output("integrator.havePhysicsPackage(hydro)") @@ -457,6 +502,7 @@ restoreCycle = restoreCycle, vizMethod = vizMethod, vizBaseName = "Sedov-cylindrical-2d-%ix%i" % (nRadial, nTheta), + vizDerivs=vizDerivs, vizDir = vizDir, vizStep = vizCycle, vizTime = vizTime, @@ -579,6 +625,7 @@ # Plot the final state. #------------------------------------------------------------------------------- if graphics: + from SpheralMatplotlib import * rPlot = plotNodePositions2d(db, colorNodeLists=0, colorDomains=1) rhoPlot, velPlot, epsPlot, PPlot, HPlot = plotRadialState(db) plotAnswer(answer, control.time(), @@ -591,30 +638,30 @@ (HPlot, "Sedov-cylindrical-h.png")] # Plot the specific entropy. - AsimData = Gnuplot.Data(xprof, A, - with_ = "points", - title = "Simulation", - inline = True) - AansData = Gnuplot.Data(xprof, Aans, - with_ = "lines", - title = "Solution", - inline = True) + # AsimData = Gnuplot.Data(xprof, A, + # with_ = "points", + # title = "Simulation", + # inline = True) + # AansData = Gnuplot.Data(xprof, Aans, + # with_ = "lines", + # title = "Solution", + # inline = True) - Aplot = generateNewGnuPlot() - Aplot.plot(AsimData) - Aplot.replot(AansData) - Aplot.title("Specific entropy") - Aplot.refresh() - plots.append((Aplot, "Sedov-cylindrical-entropy.png")) - - if boolCullenViscosity: - cullAlphaPlot = plotFieldList(q.ClMultiplier(), - xFunction = "%s.magnitude()", - plotStyle = "points", - winTitle = "Cullen alpha") - plots += [(cullAlphaPlot, "Sedov-planar-Cullen-alpha.png")] + # Aplot = generateNewGnuPlot() + # Aplot.plot(AsimData) + # Aplot.replot(AansData) + # Aplot.title("Specific entropy") + # Aplot.refresh() + # plots.append((Aplot, "Sedov-cylindrical-entropy.png")) + + # if boolCullenViscosity: + # cullAlphaPlot = plotFieldList(q.ClMultiplier(), + # xFunction = "%s.magnitude()", + # plotStyle = "points", + # winTitle = "Cullen alpha") + # plots += [(cullAlphaPlot, "Sedov-planar-Cullen-alpha.png")] # Make hardcopies of the plots. for p, filename in plots: - p.hardcopy(os.path.join(dataDir, filename), terminal="png") + p.figure.savefig(os.path.join(dataDir, filename)) diff --git a/tests/functional/Hydro/YeeVortex/YeeVortex.py b/tests/functional/Hydro/YeeVortex/YeeVortex.py index 404443b6c..0f7b51829 100644 --- a/tests/functional/Hydro/YeeVortex/YeeVortex.py +++ b/tests/functional/Hydro/YeeVortex/YeeVortex.py @@ -5,7 +5,7 @@ #------------------------------------------------------------------------------- # The Yee-Vortex Test #------------------------------------------------------------------------------- -import shutil +import shutil, os, sys, mpi from math import * from Spheral2d import * from SpheralTestUtilities import * @@ -13,9 +13,7 @@ from findLastRestart import * from GenerateNodeDistribution2d import * from CubicNodeGenerator import GenerateSquareNodeDistribution -from CentroidalVoronoiRelaxation import * - -import mpi +from CentroidalVoronoiRelaxation import * import DistributeNodes class YeeDensity: @@ -53,16 +51,17 @@ def __call__(self, r): #Center and radius of Vortex xc=0.0, yc=0.0, - rmax = 5.0, + rmax = 6.0, # How far should we measure the error norms? rmaxnorm = 5.0, # The number of radial points on the outside to force with constant BC - nbcrind = 10, + nbcrind = 6, #Vortex strength beta = 5.0, + #Tempurature at inf temp_inf = 1.0, @@ -70,19 +69,53 @@ def __call__(self, r): nRadial = 64, seed = "constantDTheta", - nPerh = 1.51, + # kernel options + KernelConstructor = WendlandC2Kernel, + nPerh = 3.01, + order = 7, + hmin = 1e-5, + hmax = 0.5, + hminratio = 0.1, + # hydros svph = False, crksph = False, fsisph = False, psph = False, + gsph = False, + mfm = False, + + # general hydro options asph = False, solid = False, + XSPH = False, + epsilonTensile = 0.0, + nTensile = 8, + densityUpdate = RigorousSumDensity, # VolumeScaledDensity, + compatibleEnergy = True, + evolveTotalEnergy = False, + correctVelocityGradient = True, + + # default SPH options + gradhCorrection = True, + + # CRKSPH options filter = 0.0, # For CRKSPH - KernelConstructor = NBSplineKernel, - order = 5, - Qconstructor = MonaghanGingoldViscosity, - #Qconstructor = TensorMonaghanGingoldViscosity, + + # PSPH options + HopkinsConductivity = False, + XPSH=False, + + # MFM/GSPH options + WaveSpeedConstructor = DavisWaveSpeed, # Einfeldt, Acoustic + LimiterConstructor = VanLeerLimiter, # VanLeer, Opsre, MinMod, VanAlba, Superbee + riemannLinearReconstruction = True, + riemannGradientType = SPHSameTimeGradient, # HydroAccelerationGradient, SPHGradient, RiemannGradient, MixedMethodGradient, SPHSameTimeGradient + + # artificial viscosity + Qconstructor = LimitedMonaghanGingoldViscosity, # TensorMonaghanGingoldViscosity, + Cl = 1.0, + Cq = 1.0, boolReduceViscosity = False, nhQ = 5.0, nhL = 10.0, @@ -99,25 +132,18 @@ def __call__(self, r): linearConsistent = False, fcentroidal = 0.0, fcellPressure = 0.0, - Cl = 1.0, - Cq = 0.75, linearInExpansion = False, Qlimiter = False, balsaraCorrection = False, epsilon2 = 1e-2, - hmin = 1e-5, - hmax = 0.5, - hminratio = 0.1, - cfl = 0.5, - XSPH = False, - epsilonTensile = 0.0, - nTensile = 8, - + + # integrator IntegratorConstructor = CheapSynchronousRK2Integrator, + cfl = 0.25, goalTime = 8.0, steps = None, - vizCycle = 20, - vizTime = 0.1, + vizCycle = None, + vizTime = 2.0, dt = 0.0001, dtMin = 1.0e-5, dtMax = 1.0, @@ -128,23 +154,17 @@ def __call__(self, r): domainIndependent = False, rigorousBoundaries = False, dtverbose = False, - - densityUpdate = RigorousSumDensity, # VolumeScaledDensity, - compatibleEnergy = True, - gradhCorrection = True, - HopkinsConductivity = False, # For PSPH - correctVelocityGradient = True, - evolveTotalEnergy = False, - XPSH=False, - + + # output useVoronoiOutput = False, clearDirectories = False, restoreCycle = -1, restartStep = 200, dataDir = "dumps-yeevortex-xy", graphics = True, - smooth = None, - outputFile = "None", + smooth = False, + outputFileBase = ".out", + convergenceFileBase = "xstaglattice_converge.txt", ) assert not(boolReduceViscosity and boolCullenViscosity) @@ -159,7 +179,12 @@ def __call__(self, r): elif psph: hydroname = "PSPH" elif fsisph: + Qconstructor = LimitedMonaghanGingoldViscosity hydroname = "FSISPH" +elif mfm: + hydroname = "MFM" +elif gsph: + hydroname = "GSPH" else: hydroname = "SPH" if asph: @@ -167,6 +192,13 @@ def __call__(self, r): if solid: hydroname = "solid"+hydroname + +if mfm or gsph: + convergenceFile = hydroname + "_" + str(densityUpdate) + "_" + str(riemannGradientType) + "_" + convergenceFileBase + outputFile = hydroname + "_" + str(densityUpdate) + "_" + str(riemannGradientType) + "_" + str(nRadial) + ".out" +else: + convergenceFile = hydroname+"_"+str(densityUpdate) + "_" + convergenceFileBase + outputFile = hydroname + "_" + str(densityUpdate) + "_" + str(nRadial) + ".out" #------------------------------------------------------------------------------- # Build our directory paths. #------------------------------------------------------------------------------- @@ -176,16 +208,18 @@ def __call__(self, r): SumVoronoiCellDensity : "SumVoronoiCellDensity"} baseDir = os.path.join(dataDir, hydroname, - Qconstructor.__name__, - KernelConstructor.__name__, - "Cl=%g_Cq=%g" % (Cl, Cq), - densityUpdateLabel[densityUpdate], - "compatibleEnergy=%s" % compatibleEnergy, - "Cullen=%s" % boolCullenViscosity, - "nPerh=%3.1f" % nPerh, - "fcentroidal=%f" % max(fcentroidal, filter), - "fcellPressure=%f" % fcellPressure, - "seed=%s" % seed, + #str(riemannGradientType), + #LimiterConstructor.__name__, + #Qconstructor.__name__, + #KernelConstructor.__name__, + #"Cl=%g_Cq=%g" % (Cl, Cq), + #densityUpdateLabel[densityUpdate], + #"compatibleEnergy=%s" % compatibleEnergy, + #"Cullen=%s" % boolCullenViscosity, + #"nPerh=%3.1f" % nPerh, + #"fcentroidal=%f" % max(fcentroidal, filter), + #"fcellPressure=%f" % fcellPressure, + #"seed=%s" % seed, str(nRadial)) restartDir = os.path.join(baseDir, "restarts") restartBaseName = os.path.join(restartDir, "yeevortex-xy-%i" % nRadial) @@ -199,7 +233,6 @@ def __call__(self, r): #------------------------------------------------------------------------------- # Check if the necessary output directories exist. If not, create them. #------------------------------------------------------------------------------- -import os, sys if mpi.rank == 0: if clearDirectories and os.path.exists(baseDir): shutil.rmtree(baseDir) @@ -216,7 +249,7 @@ def __call__(self, r): K = 1.0 eos = GammaLawGasMKS(gamma, mu) #eos = PolytropicEquationOfStateMKS(K,gamma,mu) - +YeeDensityFunc = YeeDensity(xc,yc,gamma,beta,temp_inf) #------------------------------------------------------------------------------- # Interpolation kernels. #------------------------------------------------------------------------------- @@ -250,10 +283,11 @@ def __call__(self, r): # Set the node properties. #------------------------------------------------------------------------------- rmaxbound = rmax + rmax/nRadial*nbcrind +print rmaxbound nr1 = nRadial + nbcrind -if seed == "lattice": +if seed == "lattice" or "xstaggeredLattice": generator = GenerateNodeDistribution2d(2*nr1, 2*nr1, - rho = YeeDensity(xc,yc,gamma,beta,temp_inf), + rho = YeeDensityFunc, distributionType = seed, xmin = (-rmaxbound, -rmaxbound), xmax = (rmaxbound, rmaxbound), @@ -263,8 +297,9 @@ def __call__(self, r): nNodePerh = nPerh, SPH = SPH) else: + generator = GenerateNodeDistribution2d(nr1, nr1, - rho = YeeDensity(xc,yc,gamma,beta,temp_inf), + rho = YeeDensityFunc, distributionType = seed, xmin = (-rmaxbound, -rmaxbound), xmax = (rmaxbound, rmaxbound), @@ -358,10 +393,15 @@ def __call__(self, r): densityUpdate = densityUpdate, HUpdate = HUpdate) elif fsisph: + if densityUpdate==RigorousSumDensity: + sumDensityNodeLists = [nodes] + else: + sumDensityNodeLists = [] hydro = FSISPH(dataBase = db, Q=q, W = WT, - cfl = cfl, + cfl = cfl, + sumDensityNodeLists = sumDensityNodeLists, densityStabilizationCoefficient = 0.1, specificThermalEnergyDiffusionCoefficient = 0.1, linearCorrectGradients = correctVelocityGradient, @@ -370,6 +410,40 @@ def __call__(self, r): ASPH = asph, epsTensile = epsilonTensile, nTensile = nTensile) +elif gsph: + limiter = LimiterConstructor() + waveSpeed = WaveSpeedConstructor() + solver = HLLC(limiter,waveSpeed,riemannLinearReconstruction) + hydro = GSPH(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + gradientType = riemannGradientType, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient=correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + XSPH = XSPH, + densityUpdate=densityUpdate, + HUpdate = IdealH, + epsTensile = epsilonTensile, + nTensile = nTensile) +elif mfm: + limiter = LimiterConstructor() + waveSpeed = WaveSpeedConstructor() + solver = HLLC(limiter,waveSpeed,riemannLinearReconstruction) + hydro = MFM(dataBase = db, + riemannSolver = solver, + W = WT, + cfl=cfl, + gradientType = riemannGradientType, + compatibleEnergyEvolution = compatibleEnergy, + correctVelocityGradient=correctVelocityGradient, + evolveTotalEnergy = evolveTotalEnergy, + XSPH = XSPH, + densityUpdate=densityUpdate, + HUpdate = IdealH, + epsTensile = epsilonTensile, + nTensile = nTensile) elif psph: hydro = PSPH(dataBase=db, W=WT, @@ -487,19 +561,23 @@ def __call__(self, r): #------------------------------------------------------------------------------- # Make the problem controller. #------------------------------------------------------------------------------- -# if useVoronoiOutput: -# import SpheralVoronoiSiloDump -# vizMethod = SpheralVoronoiSiloDump.dumpPhysicsState -# else: -# vizMethod = None # default +if useVoronoiOutput: + import SpheralVoronoiSiloDump + vizMethod = SpheralVoronoiSiloDump.dumpPhysicsState +else: + vizMethod = None # default +from SpheralPointmeshSiloDump import dumpPhysicsState + control = SpheralController(integrator, WT, initializeDerivatives = True, statsStep = statsStep, restartStep = restartStep, restartBaseName = restartBaseName, restoreCycle = restoreCycle, - #vizMethod = vizMethod, + #vizMethod = dumpPhysicsState, + #vizGhosts=True, vizBaseName = vizBaseName, + vizDerivs=True, vizDir = vizDir, vizStep = vizCycle, vizTime = vizTime, @@ -540,6 +618,7 @@ def __call__(self, r): if mpi.rank == 0: import numpy as np from Pnorm import Pnorm + rprof = np.array([sqrt(xi*xi + yi*yi) for xi, yi in zip(xprof, yprof)]) multiSort(rprof, mo, xprof, yprof, rhoprof, Pprof, vprof, epsprof, hprof,velx,vely) epsans = [] @@ -560,9 +639,13 @@ def __call__(self, r): rhoans.append(rhoi) velans.append(Vector(velxans,velyans).magnitude()) Pans.append(temp*rhoi) + L1rho2 =sum([abs(rhoprof[i]-rhoans[i]) for i in range(len(rhoans))])/len(rhoans) + Linfrho2 =max([abs(rhoprof[i]-rhoans[i]) for i in range(len(rhoans))])/len(rhoans) L1rho = Pnorm(rhoprof, rprof, rhoans).pnorm(1, rmin=0.0, rmax=rmaxnorm) + print (L1rho2,L1rho) L2rho = Pnorm(rhoprof, rprof, rhoans).pnorm(2, rmin=0.0, rmax=rmaxnorm) Linfrho = Pnorm(rhoprof, rprof, rhoans).pnorm("inf", rmin=0.0, rmax=rmaxnorm) + print (Linfrho2,Linfrho) L1eps = Pnorm(epsprof, rprof, epsans).pnorm(1, rmin=0.0, rmax=rmaxnorm) L2eps = Pnorm(epsprof, rprof, epsans).pnorm(2, rmin=0.0, rmax=rmaxnorm) Linfeps = Pnorm(epsprof, rprof, epsans).pnorm("inf", rmin=0.0, rmax=rmaxnorm) @@ -572,8 +655,11 @@ def __call__(self, r): L1P = Pnorm(Pprof, rprof, Pans).pnorm(1, rmin=0.0, rmax=rmaxnorm) L2P = Pnorm(Pprof, rprof, Pans).pnorm(2, rmin=0.0, rmax=rmaxnorm) LinfP = Pnorm(Pprof, rprof, velans).pnorm("inf", rmin=0.0, rmax=rmaxnorm) - with open("converge-CRK-%s-cullen-%s-PSPH-%s.txt" % (CRKSPH,boolCullenViscosity,PSPH), "a") as myfile: - myfile.write(("#" + 14*"%16s\t " + "%16s\n") % ("nRadial", "L1rho", "L1eps", "L1vel", "L2rho", "L2eps", "L2vel", "Linfrho", "Linfeps", "Linfvel", "L1P", "L2P", "LinfP", "cycles", "runtime")) + + isNewFile = not os.path.exists(convergenceFile) + with open(convergenceFile, "a") as myfile: + if isNewFile: + myfile.write(("#" + 14*"%16s\t " + "%16s\n") % ("nRadial", "L1rho", "L1eps", "L1vel", "L2rho", "L2eps", "L2vel", "Linfrho", "Linfeps", "Linfvel", "L1P", "L2P", "LinfP", "cycles", "runtime")) myfile.write((14*"%16s\t " + "%16s\n") % (nRadial, L1rho, L1eps, L1vel, L2rho, L2eps, L2vel, Linfrho, Linfeps, Linfvel, L1P, L2P, LinfP, control.totalSteps, control.stepTimer.elapsedTime)) f = open(outputFile, "w") f.write(("# " + 19*"%15s " + "\n") % ("r", "x", "y", "rho", "P", "v", "eps", "h", "mortonOrder", "rhoans", "epsans", "velans", diff --git a/tests/functional/Strength/TaylorImpact/TaylorImpact.py b/tests/functional/Strength/TaylorImpact/TaylorImpact.py index 08bc9519d..eaa1f3bc3 100644 --- a/tests/functional/Strength/TaylorImpact/TaylorImpact.py +++ b/tests/functional/Strength/TaylorImpact/TaylorImpact.py @@ -47,8 +47,6 @@ from Spheral import * from SpheralTestUtilities import * -from SpheralGnuPlotUtilities import * -from SpheralController import * #------------------------------------------------------------------------------- # Identify ourselves! @@ -84,7 +82,7 @@ fsisph = False, # general hydro options - asph = False, # Only for H evolution, not hydro algorithm + asph = True, # Only for H evolution, not hydro algorithm HUpdate = IdealH, densityUpdate = IntegrateDensity, compatibleEnergy = True, diff --git a/tests/integration.ats b/tests/integration.ats index 6d9ef1f62..2cfc647eb 100644 --- a/tests/integration.ats +++ b/tests/integration.ats @@ -92,6 +92,7 @@ source("functional/Gravity/ApproximatePolyhedralGravityModel.py") # Strength tests. #source("functional/Strength/PlateImpact/PlateImpact-1d.py") +source("functional/Strength/TaylorImpact/TaylorImpact.py") source("functional/Strength/CollidingPlates/CollidingPlates-1d.py") source("functional/Strength/DiametralCompression/DiametralCompression.py") source("functional/Strength/Verney/Verney-spherical.py")